diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6cf0e9f02f..0ec355aa3c 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -96,6 +96,8 @@ jobs: os: ubuntu-latest - tuple: loongarch64-unknown-linux-gnu os: ubuntu-latest + - tuple: hexagon-unknown-linux-musl + os: ubuntu-latest - tuple: wasm32-wasip1 os: ubuntu-latest @@ -207,6 +209,11 @@ jobs: tuple: amdgcn-amd-amdhsa os: ubuntu-latest norun: true + - target: + tuple: hexagon-unknown-linux-musl + os: ubuntu-latest + norun: true + build_std: true steps: - uses: actions/checkout@v4 @@ -300,7 +307,7 @@ jobs: # Check that the generated files agree with the checked-in versions. check-stdarch-gen: needs: [style] - name: Check stdarch-gen-{arm, loongarch} output + name: Check stdarch-gen-{arm, loongarch, hexagon} output runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 @@ -318,6 +325,10 @@ jobs: run: | cargo run --bin=stdarch-gen-loongarch --release -- crates/stdarch-gen-loongarch/lasx.spec git diff --exit-code + - name: Check hexagon + run: | + cargo run -p stdarch-gen-hexagon --release + git diff --exit-code conclusion: needs: diff --git a/Cargo.lock b/Cargo.lock index 70f09adf2c..7e7cb59288 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4,18 +4,18 @@ version = 4 [[package]] name = "aho-corasick" -version = "1.1.3" +version = "1.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" +checksum = "ddd31a130427c27518df266943a5308ed92d4b226cc639f5a8f1002816174301" dependencies = [ "memchr", ] [[package]] name = "anstream" -version = "0.6.20" +version = "0.6.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3ae563653d1938f79b1ab1b5e668c87c76a9930414574a6583a7b7e11a8e6192" +checksum = "43d5b281e737544384e969a5ccad3f1cdd24b48086a0fc1b2a5262a26b8f4f4a" dependencies = [ "anstyle", "anstyle-parse", @@ -28,9 +28,9 @@ dependencies = [ [[package]] name = "anstyle" -version = "1.0.11" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "862ed96ca487e809f1c8e5a8447f6ee2cf102f846893800b20cebdf541fc6bbd" +checksum = "5192cca8006f1fd4f7237516f40fa183bb07f8fbdfedaa0036de5ea9b0b45e78" [[package]] name = "anstyle-parse" @@ -43,29 +43,29 @@ dependencies = [ [[package]] name = "anstyle-query" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9e231f6134f61b71076a3eab506c379d4f36122f2af15a9ff04415ea4c3339e2" +checksum = "40c48f72fd53cd289104fc64099abca73db4166ad86ea0b4341abe65af83dadc" dependencies = [ - "windows-sys 0.60.2", + "windows-sys", ] [[package]] name = "anstyle-wincon" -version = "3.0.10" +version = "3.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3e0633414522a32ffaac8ac6cc8f748e090c5717661fddeea04219e2344f5f2a" +checksum = "291e6a250ff86cd4a820112fb8898808a366d8f9f58ce16d1f538353ad55747d" dependencies = [ "anstyle", "once_cell_polyfill", - "windows-sys 0.60.2", + "windows-sys", ] [[package]] name = "anyhow" -version = "1.0.99" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0674a1ddeecb70197781e945de4b3b8ffb61fa939a5597bcf48503737663100" +checksum = "5f0e0fee31ef5ed1ba1316088939cea399010ed7731dba877ed44aeb407a75ea" [[package]] name = "assert-instr-macro" @@ -84,15 +84,15 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "bitflags" -version = "2.9.4" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" +checksum = "812e12b5285cc515a9c72a5c1d3b6d46a19dac5acfef5265968c166106e31dd3" [[package]] name = "cc" -version = "1.2.36" +version = "1.2.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5252b3d2648e5eedbc1a6f501e3c795e07025c1e93bbf8bbdd6eef7f447a6d54" +checksum = "47b26a0954ae34af09b50f0de26458fa95369a0d478d8236d3f93082b219bd29" dependencies = [ "find-msvc-tools", "shlex", @@ -100,15 +100,15 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" +checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" [[package]] name = "clap" -version = "4.5.47" +version = "4.5.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" +checksum = "63be97961acde393029492ce0be7a1af7e323e6bae9511ebfac33751be5e6806" dependencies = [ "clap_builder", "clap_derive", @@ -116,9 +116,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.47" +version = "4.5.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" +checksum = "7f13174bda5dfd69d7e947827e5af4b0f2f94a4a3ee92912fba07a66150f21e2" dependencies = [ "anstream", "anstyle", @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.55" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "a92793da1a46a5f2a02a6f4c46c6496b28c43638adea8306fcb0caa1634f24e5" dependencies = [ "heck", "proc-macro2", @@ -140,9 +140,9 @@ dependencies = [ [[package]] name = "clap_lex" -version = "0.7.5" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b94f61472cee1439c0b966b47e3aca9ae07e45d070759512cd390ea2bebc6675" +checksum = "3a822ea5bc7590f9d40f1ba12c0dc3c2760f3482c6984db1573ad11031420831" [[package]] name = "colorchoice" @@ -185,9 +185,9 @@ checksum = "d0a5c400df2834b80a4c3327b3aad3a4c4cd4de0629063962b03235697506a28" [[package]] name = "darling" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" dependencies = [ "darling_core", "darling_macro", @@ -195,9 +195,9 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0d00b9596d185e565c2207a0b01f8bd1a135483d02d9b7b0a54b11da8d53412e" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" dependencies = [ "fnv", "ident_case", @@ -209,9 +209,9 @@ dependencies = [ [[package]] name = "darling_macro" -version = "0.20.11" +version = "0.21.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core", "quote", @@ -231,10 +231,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" [[package]] -name = "env_logger" -version = "0.8.4" +name = "env_filter" +version = "1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a19187fea3ac7e84da7dacf48de0c45d63c6a76f9490dae389aead16c243fce3" +checksum = "7a1c3cc8e57274ec99de65301228b537f1e4eedc1b8e0f9411c6caac8ae7308f" dependencies = [ "log", "regex", @@ -253,6 +253,16 @@ dependencies = [ "termcolor", ] +[[package]] +name = "env_logger" +version = "0.11.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b2daee4ea451f429a58296525ddf28b45a3b64f1acf6587e2067437bb11e218d" +dependencies = [ + "env_filter", + "log", +] + [[package]] name = "equivalent" version = "1.0.2" @@ -261,9 +271,9 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "find-msvc-tools" -version = "0.1.1" +version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fd99930f64d146689264c637b5af2f0233a933bef0d8570e2526bf9e083192d" +checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" [[package]] name = "fnv" @@ -271,17 +281,37 @@ version = "1.0.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1" +[[package]] +name = "foldhash" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" + [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "libc", "wasi", ] +[[package]] +name = "getrandom" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "139ef39800118c7683f2fd3c98c1b23c09ae076556b435f8e9064ae108aaeeec" +dependencies = [ + "cfg-if", + "libc", + "r-efi", + "rand_core 0.10.0", + "wasip2", + "wasip3", +] + [[package]] name = "hashbrown" version = "0.12.3" @@ -293,6 +323,15 @@ name = "hashbrown" version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" +dependencies = [ + "foldhash", +] + +[[package]] +name = "hashbrown" +version = "0.16.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" [[package]] name = "heck" @@ -308,9 +347,15 @@ checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "humantime" -version = "2.2.0" +version = "2.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b112acc8b3adf4b107a8ec20977da0273a8c386765a3ec0229bd500a1443f9f" +checksum = "135b12329e5e3ce057a9f972339ea52bc954fe1e9358ef27f95e89716fbc5424" + +[[package]] +name = "id-arena" +version = "2.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d3067d79b975e8844ca9eb072e16b31c3c1c36928edf9c6789548c524d0d954" [[package]] name = "ident_case" @@ -330,12 +375,14 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.11.0" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2481980430f9f78649238835720ddccc57e52df14ffce1c6f37391d61b563e9" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", - "hashbrown 0.15.5", + "hashbrown 0.16.1", + "serde", + "serde_core", ] [[package]] @@ -357,20 +404,20 @@ dependencies = [ [[package]] name = "is-terminal" -version = "0.4.16" +version = "0.4.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e04d7f318608d35d4b61ddd75cbdaee86b023ebe2bd5a66ee0915f0bf93095a9" +checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi", "libc", - "windows-sys 0.59.0", + "windows-sys", ] [[package]] name = "is_terminal_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7943c866cc5cd64cbc25b2e01621d07fa8eb2a1a23160ee81ce38704e97b8ecf" +checksum = "a6cb138bb79a146c1bd460005623e142ef0181e3d0219cb493e02f7d08a35695" [[package]] name = "itertools" @@ -383,15 +430,21 @@ dependencies = [ [[package]] name = "itoa" -version = "1.0.15" +version = "1.0.17" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2" + +[[package]] +name = "leb128fmt" +version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" +checksum = "09edd9e8b54e49e587e4f6295a7d29c3ea94d469cb40ab8ca70b288248a81db2" [[package]] name = "libc" -version = "0.2.175" +version = "0.2.181" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a82ae493e598baaea5209805c49bbf2ea7de956d50d7da0da1164f9c6d28543" +checksum = "459427e2af2b9c839b132acb702a1c654d95e10f8c326bfc2ad11310e458b1c5" [[package]] name = "linked-hash-map" @@ -401,21 +454,21 @@ checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "memchr" -version = "2.7.6" +version = "2.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" +checksum = "f8ca58f447f06ed17d5fc4043ce1b10dd205e060fb3ce5b979b8ed8e59ff3f79" [[package]] name = "once_cell_polyfill" -version = "1.70.1" +version = "1.70.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a4895175b425cb1f87721b59f0f286c2092bd4af812243672510e1ac53e2e0ad" +checksum = "384b8ab6d37215f3c5301a95a4accb5d64aa607f1fcb26a11b5303878451b4fe" [[package]] name = "ppv-lite86" @@ -436,11 +489,21 @@ dependencies = [ "log", ] +[[package]] +name = "prettyplease" +version = "0.2.37" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" +dependencies = [ + "proc-macro2", + "syn", +] + [[package]] name = "proc-macro2" -version = "1.0.101" +version = "1.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" +checksum = "8fd00f0bb2e90d81d1044c2b32617f68fcb9fa3bb7640c23e9c748e53fb30934" dependencies = [ "unicode-ident", ] @@ -467,24 +530,30 @@ dependencies = [ [[package]] name = "quickcheck" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "588f6378e4dd99458b60ec275b4477add41ce4fa9f64dcba6f15adccb19b50d6" +checksum = "95c589f335db0f6aaa168a7cd27b1fc6920f5e1470c804f814d9cd6e62a0f70b" dependencies = [ - "env_logger 0.8.4", + "env_logger 0.11.9", "log", - "rand", + "rand 0.10.0", ] [[package]] name = "quote" -version = "1.0.40" +version = "1.0.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1885c039570dc00dcb4ff087a89e185fd56bae234ddc7f056a945bf36467248d" +checksum = "21b2ebcf727b7760c461f091f9f0f539b77b8e87f2fd88131e7f1b433b3cece4" dependencies = [ "proc-macro2", ] +[[package]] +name = "r-efi" +version = "5.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" + [[package]] name = "rand" version = "0.8.5" @@ -493,7 +562,17 @@ checksum = "34af8d1a0e25924bc5b7c43c079c942339d8f0a8b57c39049bef581b46327404" dependencies = [ "libc", "rand_chacha", - "rand_core", + "rand_core 0.6.4", +] + +[[package]] +name = "rand" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bc266eb313df6c5c09c1c7b1fbe2510961e5bcd3add930c1e31f7ed9da0feff8" +dependencies = [ + "getrandom 0.4.1", + "rand_core 0.10.0", ] [[package]] @@ -503,7 +582,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e6c10a63a0fa32252be49d21e7709d4d4baf8d231c2dbce1eaa8141b9b127d88" dependencies = [ "ppv-lite86", - "rand_core", + "rand_core 0.6.4", ] [[package]] @@ -512,9 +591,15 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom", + "getrandom 0.2.17", ] +[[package]] +name = "rand_core" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0c8d0fd677905edcbeedbf2edb6494d676f0e98d54d5cf9bda0b061cb8fb8aba" + [[package]] name = "rayon" version = "1.11.0" @@ -537,9 +622,9 @@ dependencies = [ [[package]] name = "regex" -version = "1.11.2" +version = "1.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23d7fd106d8c02486a8d64e778353d1cffe08ce79ac2e82f540c86d0facf6912" +checksum = "e10754a14b9137dd7b1e3e5b0493cc9171fdd105e0ab477f51b72e7f3ac0e276" dependencies = [ "aho-corasick", "memchr", @@ -549,9 +634,9 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.10" +version = "0.4.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9458fa0bfeeac22b5ca447c63aaf45f28439a709ccd244698632f9aa6394d6" +checksum = "6e1dd4122fc1595e8162618945476892eefca7b88c52820e74af6262213cae8f" dependencies = [ "aho-corasick", "memchr", @@ -560,21 +645,21 @@ dependencies = [ [[package]] name = "regex-syntax" -version = "0.8.6" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" +checksum = "a96887878f22d7bad8a3b6dc5b7440e0ada9a245242924394987b21cf2210a4c" [[package]] name = "rustc-demangle" -version = "0.1.26" +version = "0.1.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" +checksum = "b50b8869d9fc858ce7266cce0194bd74df58b9d0e3f6df3a9fc8eb470d95c09d" [[package]] name = "ryu" -version = "1.0.20" +version = "1.0.23" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +checksum = "9774ba4a74de5f7b1c1451ed6cd5285a32eddb5cccb8cc655a4e50009e06477f" [[package]] name = "same-file" @@ -587,36 +672,46 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "9a8e94ea7f378bd32cbbd37198a4a91436180c5bb472411e48b5ec2e2124ae9e" dependencies = [ + "serde_core", "serde_derive", ] [[package]] name = "serde-xml-rs" -version = "0.8.1" +version = "0.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53630160a98edebde0123eb4dfd0fce6adff091b2305db3154a9e920206eb510" +checksum = "cc2215ce3e6a77550b80a1c37251b7d294febaf42e36e21b7b411e0bf54d540d" dependencies = [ "log", "serde", "thiserror", - "xml-rs", + "xml", +] + +[[package]] +name = "serde_core" +version = "1.0.228" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41d385c7d4ca58e59fc732af25c3983b67ac852c1a25000afe1175de458b67ad" +dependencies = [ + "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.228" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", @@ -625,32 +720,32 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.143" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d401abef1d108fbd9cbaebc3e46611f4b1021f714a0597a71f41ee463f5f4a5a" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", - "ryu", "serde", + "serde_core", + "zmij", ] [[package]] name = "serde_with" -version = "3.14.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" +checksum = "4fa237f2807440d238e0364a218270b98f767a00d3dada77b1c53ae88940e2e7" dependencies = [ - "serde", - "serde_derive", + "serde_core", "serde_with_macros", ] [[package]] name = "serde_with_macros" -version = "3.14.0" +version = "3.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" +checksum = "52a8e3ca0ca629121f70ab50f95249e5a6f925cc0f6ffe8256c45b728875706c" dependencies = [ "darling", "proc-macro2", @@ -699,11 +794,18 @@ dependencies = [ "walkdir", ] +[[package]] +name = "stdarch-gen-hexagon" +version = "0.1.0" +dependencies = [ + "regex", +] + [[package]] name = "stdarch-gen-loongarch" version = "0.1.0" dependencies = [ - "rand", + "rand 0.8.5", ] [[package]] @@ -736,7 +838,7 @@ version = "0.0.0" dependencies = [ "core_arch", "quickcheck", - "rand", + "rand 0.8.5", ] [[package]] @@ -747,9 +849,9 @@ checksum = "7da8b5736845d9f2fcb837ea5d9e2628564b3b043a70948a3f0b778838c5fb4f" [[package]] name = "syn" -version = "2.0.106" +version = "2.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" +checksum = "6e614ed320ac28113fa64972c4262d5dbc89deacdfd00c34a3e4cea073243c12" dependencies = [ "proc-macro2", "quote", @@ -773,18 +875,18 @@ dependencies = [ [[package]] name = "thiserror" -version = "1.0.69" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b6aaf5339b578ea85b50e080feb250a3e8ae8cfcdff9a461c9ec2904bc923f52" +checksum = "4288b5bcbc7920c07a1149a35cf9590a2aa808e0bc1eafaade0b80947865fbc4" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.69" +version = "2.0.18" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" +checksum = "ebc4ee7f67670e9b64d05fa4253e753e016c6c95ff35b89b7941d6b856dec1d5" dependencies = [ "proc-macro2", "quote", @@ -793,9 +895,15 @@ dependencies = [ [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "537dd038a89878be9b64dd4bd1b260315c1bb94f4d784956b81e27a088d9a09e" + +[[package]] +name = "unicode-xid" +version = "0.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "ebc1c04c71510c7f702b52b7c350734c9ff1295c464a03335b00bb84fc54f853" [[package]] name = "utf8parse" @@ -820,194 +928,196 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] -name = "wasmparser" -version = "0.235.0" +name = "wasip2" +version = "1.0.2+wasi-0.2.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" +checksum = "9517f9239f02c069db75e65f174b3da828fe5f5b945c4dd26bd25d89c03ebcf5" dependencies = [ - "bitflags", - "indexmap 2.11.0", - "semver", + "wit-bindgen", ] [[package]] -name = "wasmprinter" -version = "0.235.0" +name = "wasip3" +version = "0.4.0+wasi-0.3.0-rc-2026-01-06" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75aa8e9076de6b9544e6dab4badada518cca0bf4966d35b131bbd057aed8fa0a" +checksum = "5428f8bf88ea5ddc08faddef2ac4a67e390b88186c703ce6dbd955e1c145aca5" dependencies = [ - "anyhow", - "termcolor", - "wasmparser", + "wit-bindgen", ] [[package]] -name = "winapi-util" -version = "0.1.10" +name = "wasm-encoder" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" +checksum = "990065f2fe63003fe337b932cfb5e3b80e0b4d0f5ff650e6985b1048f62c8319" dependencies = [ - "windows-sys 0.60.2", + "leb128fmt", + "wasmparser 0.244.0", ] [[package]] -name = "windows-link" -version = "0.1.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e6ad25900d524eaabdbbb96d20b4311e1e7ae1699af4fb28c17ae66c80d798a" - -[[package]] -name = "windows-sys" -version = "0.59.0" +name = "wasm-metadata" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e38bc4d79ed67fd075bcc251a1c39b32a1776bbe92e5bef1f0bf1f8c531853b" +checksum = "bb0e353e6a2fbdc176932bbaab493762eb1255a7900fe0fea1a2f96c296cc909" dependencies = [ - "windows-targets 0.52.6", + "anyhow", + "indexmap 2.13.0", + "wasm-encoder", + "wasmparser 0.244.0", ] [[package]] -name = "windows-sys" -version = "0.60.2" +name = "wasmparser" +version = "0.235.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +checksum = "161296c618fa2d63f6ed5fffd1112937e803cb9ec71b32b01a76321555660917" dependencies = [ - "windows-targets 0.53.3", + "bitflags", + "indexmap 2.13.0", + "semver", ] [[package]] -name = "windows-targets" -version = "0.52.6" +name = "wasmparser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9b724f72796e036ab90c1021d4780d4d3d648aca59e491e6b98e725b84e99973" +checksum = "47b807c72e1bac69382b3a6fb3dbe8ea4c0ed87ff5629b8685ae6b9a611028fe" dependencies = [ - "windows_aarch64_gnullvm 0.52.6", - "windows_aarch64_msvc 0.52.6", - "windows_i686_gnu 0.52.6", - "windows_i686_gnullvm 0.52.6", - "windows_i686_msvc 0.52.6", - "windows_x86_64_gnu 0.52.6", - "windows_x86_64_gnullvm 0.52.6", - "windows_x86_64_msvc 0.52.6", + "bitflags", + "hashbrown 0.15.5", + "indexmap 2.13.0", + "semver", ] [[package]] -name = "windows-targets" -version = "0.53.3" +name = "wasmprinter" +version = "0.235.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d5fe6031c4041849d7c496a8ded650796e7b6ecc19df1a431c1a363342e5dc91" +checksum = "75aa8e9076de6b9544e6dab4badada518cca0bf4966d35b131bbd057aed8fa0a" dependencies = [ - "windows-link", - "windows_aarch64_gnullvm 0.53.0", - "windows_aarch64_msvc 0.53.0", - "windows_i686_gnu 0.53.0", - "windows_i686_gnullvm 0.53.0", - "windows_i686_msvc 0.53.0", - "windows_x86_64_gnu 0.53.0", - "windows_x86_64_gnullvm 0.53.0", - "windows_x86_64_msvc 0.53.0", + "anyhow", + "termcolor", + "wasmparser 0.235.0", ] [[package]] -name = "windows_aarch64_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3" - -[[package]] -name = "windows_aarch64_gnullvm" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469" - -[[package]] -name = "windows_aarch64_msvc" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c" - -[[package]] -name = "windows_i686_gnu" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b" - -[[package]] -name = "windows_i686_gnu" -version = "0.53.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.52.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66" - -[[package]] -name = "windows_i686_gnullvm" -version = "0.53.0" +name = "winapi-util" +version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11" +checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" +dependencies = [ + "windows-sys", +] [[package]] -name = "windows_i686_msvc" -version = "0.52.6" +name = "windows-link" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66" +checksum = "f0805222e57f7521d6a62e36fa9163bc891acd422f971defe97d64e70d0a4fe5" [[package]] -name = "windows_i686_msvc" -version = "0.53.0" +name = "windows-sys" +version = "0.61.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d" +checksum = "ae137229bcbd6cdf0f7b80a31df61766145077ddf49416a728b02cb3921ff3fc" +dependencies = [ + "windows-link", +] [[package]] -name = "windows_x86_64_gnu" -version = "0.52.6" +name = "wit-bindgen" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78" +checksum = "d7249219f66ced02969388cf2bb044a09756a083d0fab1e566056b04d9fbcaa5" +dependencies = [ + "wit-bindgen-rust-macro", +] [[package]] -name = "windows_x86_64_gnu" -version = "0.53.0" +name = "wit-bindgen-core" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba" +checksum = "ea61de684c3ea68cb082b7a88508a8b27fcc8b797d738bfc99a82facf1d752dc" +dependencies = [ + "anyhow", + "heck", + "wit-parser", +] [[package]] -name = "windows_x86_64_gnullvm" -version = "0.52.6" +name = "wit-bindgen-rust" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d" +checksum = "b7c566e0f4b284dd6561c786d9cb0142da491f46a9fbed79ea69cdad5db17f21" +dependencies = [ + "anyhow", + "heck", + "indexmap 2.13.0", + "prettyplease", + "syn", + "wasm-metadata", + "wit-bindgen-core", + "wit-component", +] [[package]] -name = "windows_x86_64_gnullvm" -version = "0.53.0" +name = "wit-bindgen-rust-macro" +version = "0.51.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57" +checksum = "0c0f9bfd77e6a48eccf51359e3ae77140a7f50b1e2ebfe62422d8afdaffab17a" +dependencies = [ + "anyhow", + "prettyplease", + "proc-macro2", + "quote", + "syn", + "wit-bindgen-core", + "wit-bindgen-rust", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.52.6" +name = "wit-component" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec" +checksum = "9d66ea20e9553b30172b5e831994e35fbde2d165325bec84fc43dbf6f4eb9cb2" +dependencies = [ + "anyhow", + "bitflags", + "indexmap 2.13.0", + "log", + "serde", + "serde_derive", + "serde_json", + "wasm-encoder", + "wasm-metadata", + "wasmparser 0.244.0", + "wit-parser", +] [[package]] -name = "windows_x86_64_msvc" -version = "0.53.0" +name = "wit-parser" +version = "0.244.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486" +checksum = "ecc8ac4bc1dc3381b7f59c34f00b67e18f910c2c0f50015669dde7def656a736" +dependencies = [ + "anyhow", + "id-arena", + "indexmap 2.13.0", + "log", + "semver", + "serde", + "serde_derive", + "serde_json", + "unicode-xid", + "wasmparser 0.244.0", +] [[package]] -name = "xml-rs" -version = "0.8.27" +name = "xml" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6fd8403733700263c6eb89f192880191f1b83e332f7a20371ddcf421c4a337c7" +checksum = "b8aa498d22c9bbaf482329839bc5620c46be275a19a812e9a22a2b07529a642a" [[package]] name = "yaml-rust" @@ -1020,20 +1130,26 @@ dependencies = [ [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "db6d35d663eadb6c932438e763b262fe1a70987f9ae936e60158176d710cae4a" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "4122cd3169e94605190e77839c9a40d40ed048d305bfdc146e7df40ab0f3e517" dependencies = [ "proc-macro2", "quote", "syn", ] + +[[package]] +name = "zmij" +version = "1.0.21" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8848ee67ecc8aedbaf3e4122217aff892639231befc6a1b58d29fff4c2cabaa" diff --git a/ci/docker/hexagon-unknown-linux-musl/Dockerfile b/ci/docker/hexagon-unknown-linux-musl/Dockerfile new file mode 100644 index 0000000000..f6c0efd946 --- /dev/null +++ b/ci/docker/hexagon-unknown-linux-musl/Dockerfile @@ -0,0 +1,46 @@ +FROM ubuntu:25.10 + +RUN apt-get update && apt-get install -y --no-install-recommends \ + gcc \ + libc6-dev \ + ca-certificates \ + curl \ + zstd \ + file \ + make \ + libc++1 \ + libglib2.0-0t64 \ + libunwind-20 \ + liburing2 \ + llvm + +# The Hexagon toolchain requires libc++ and libunwind at runtime - create symlinks from versioned files +RUN cd /usr/lib/x86_64-linux-gnu && \ + for f in libc++.so.1.0.*; do ln -sf "$f" libc++.so.1; done && \ + for f in libc++abi.so.1.0.*; do ln -sf "$f" libc++abi.so.1; done && \ + for f in libunwind.so.1.0.*; do ln -sf "$f" libunwind.so.1; done + +# Download and install the Hexagon cross toolchain from +# https://github.com/quic/toolchain_for_hexagon/releases/tag/v21.1.8 +# Includes clang cross-compiler, musl sysroot, and qemu-hexagon. +# +# The tarball contains directories with restrictive (0700) permissions. +# In rootless Podman, chmod fails on tar-extracted files within the same +# layer due to overlayfs limitations in user namespaces. Splitting into +# two RUN steps lets chmod work via overlayfs copy-up from the lower layer. +RUN curl -L -o /tmp/hexagon-toolchain.tar.zst \ + https://artifacts.codelinaro.org/artifactory/codelinaro-toolchain-for-hexagon/21.1.8/clang+llvm-21.1.8-cross-hexagon-unknown-linux-musl.tar.zst && \ + mkdir -p /opt/hexagon-toolchain && \ + cd /opt/hexagon-toolchain && \ + (unzstd -c /tmp/hexagon-toolchain.tar.zst | tar -xf - --strip-components=2 --no-same-permissions || true) && \ + rm /tmp/hexagon-toolchain.tar.zst +RUN find /opt/hexagon-toolchain -type d -exec chmod a+rx {} + 2>/dev/null; \ + find /opt/hexagon-toolchain -type f -exec chmod a+r {} + 2>/dev/null; \ + find /opt/hexagon-toolchain -type f -perm /111 -exec chmod a+rx {} + 2>/dev/null; \ + /opt/hexagon-toolchain/bin/hexagon-unknown-linux-musl-clang --version + +ENV PATH="/opt/hexagon-toolchain/bin:${PATH}" \ + CARGO_TARGET_HEXAGON_UNKNOWN_LINUX_MUSL_LINKER=hexagon-unknown-linux-musl-clang \ + CARGO_TARGET_HEXAGON_UNKNOWN_LINUX_MUSL_RUNNER="qemu-hexagon -L /opt/hexagon-toolchain/target/hexagon-unknown-linux-musl" \ + CARGO_UNSTABLE_BUILD_STD_FEATURES=llvm-libunwind \ + OBJDUMP=llvm-objdump diff --git a/ci/run.sh b/ci/run.sh index 8a0b5fa26f..ea012b42f9 100755 --- a/ci/run.sh +++ b/ci/run.sh @@ -50,6 +50,9 @@ case ${TARGET} in riscv*) export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+zk,+zks,+zbb,+zbc" ;; + hexagon*) + export RUSTFLAGS="${RUSTFLAGS} -Ctarget-feature=+hvxv60,+hvx-length128b" + ;; esac echo "RUSTFLAGS=${RUSTFLAGS}" diff --git a/crates/core_arch/src/core_arch_docs.md b/crates/core_arch/src/core_arch_docs.md index 7075945754..9b52fb2af1 100644 --- a/crates/core_arch/src/core_arch_docs.md +++ b/crates/core_arch/src/core_arch_docs.md @@ -186,6 +186,7 @@ others at: * [`arm`] * [`aarch64`] * [`amdgpu`] +* [`hexagon`] * [`riscv32`] * [`riscv64`] * [`mips`] @@ -203,6 +204,7 @@ others at: [`arm`]: ../../core/arch/arm/index.html [`aarch64`]: ../../core/arch/aarch64/index.html [`amdgpu`]: ../../core/arch/amdgpu/index.html +[`hexagon`]: ../../core/arch/hexagon/index.html [`riscv32`]: ../../core/arch/riscv32/index.html [`riscv64`]: ../../core/arch/riscv64/index.html [`mips`]: ../../core/arch/mips/index.html diff --git a/crates/core_arch/src/hexagon/mod.rs b/crates/core_arch/src/hexagon/mod.rs new file mode 100644 index 0000000000..c370f3da15 --- /dev/null +++ b/crates/core_arch/src/hexagon/mod.rs @@ -0,0 +1,29 @@ +//! Hexagon architecture intrinsics +//! +//! This module contains intrinsics for the Qualcomm Hexagon DSP architecture, +//! including the Hexagon Vector Extensions (HVX). +//! +//! HVX is a wide SIMD architecture designed for high-performance signal processing, +//! machine learning, and image processing workloads. +//! +//! ## Vector Length Modes +//! +//! HVX supports two vector length modes: +//! - 64-byte mode (512-bit vectors): Use the [`v64`] module +//! - 128-byte mode (1024-bit vectors): Use the [`v128`] module +//! +//! Both modules are available unconditionally, but require the appropriate +//! target features to actually use the intrinsics: +//! - For 64-byte mode: `-C target-feature=+hvx-length64b` +//! - For 128-byte mode: `-C target-feature=+hvx-length128b` +//! +//! Note that HVX v66 and later default to 128-byte mode, while earlier versions +//! (v60-v65) default to 64-byte mode. + +/// HVX intrinsics for 64-byte vector mode (512-bit vectors) +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub mod v64; + +/// HVX intrinsics for 128-byte vector mode (1024-bit vectors) +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub mod v128; diff --git a/crates/core_arch/src/hexagon/v128.rs b/crates/core_arch/src/hexagon/v128.rs new file mode 100644 index 0000000000..ef7ff4205c --- /dev/null +++ b/crates/core_arch/src/hexagon/v128.rs @@ -0,0 +1,7489 @@ +//! Hexagon HVX 128-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in 128-byte vector mode (1024-bit vectors). +//! +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! In 128-byte mode: +//! - `HvxVector` is 1024 bits (128 bytes) containing 32 x 32-bit values +//! - `HvxVectorPair` is 2048 bits (256 bytes) +//! - `HvxVectorPred` is 1024 bits (128 bytes) for predicate operations +//! +//! To use this module, compile with `-C target-feature=+hvx-length128b`. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! +//! Each version includes all features from previous versions. + +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor}; + +// HVX type definitions for 128-byte vector mode +types! { + #![unstable(feature = "stdarch_hexagon", issue = "151523")] + + /// HVX vector type (1024 bits / 128 bytes) + /// + /// This type represents a single HVX vector register containing 32 x 32-bit values. + pub struct HvxVector(32 x i32); + + /// HVX vector pair type (2048 bits / 256 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(64 x i32); + + /// HVX vector predicate type (1024 bits / 128 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(32 x i32); +} + +// LLVM intrinsic declarations for 128-byte vector mode +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.V6.extractw.128B"] + fn extractw(_: HvxVector, _: i32) -> i32; + #[link_name = "llvm.hexagon.V6.get.qfext.128B"] + fn get_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.hi.128B"] + fn hi(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lo.128B"] + fn lo(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatb.128B"] + fn lvsplatb(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplath.128B"] + fn lvsplath(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatw.128B"] + fn lvsplatw(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.128B"] + fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.n.128B"] + fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.not.128B"] + fn pred_not(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.128B"] + fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.n.128B"] + fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2.128B"] + fn pred_scalar2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2v2.128B"] + fn pred_scalar2v2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.xor.128B"] + fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.set.qfext.128B"] + fn set_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqh.128B"] + fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqw.128B"] + fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.128B"] + fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx.128B"] + fn v6mpyhubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.128B"] + fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx.128B"] + fn v6mpyvubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai.128B"] + fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai.128B"] + fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai.128B"] + fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai.128B"] + fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vabs.f8.128B"] + fn vabs_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.hf.128B"] + fn vabs_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.sf.128B"] + fn vabs_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.128B"] + fn vabsb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.sat.128B"] + fn vabsb_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffh.128B"] + fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffub.128B"] + fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffuh.128B"] + fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffw.128B"] + fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.128B"] + fn vabsh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.sat.128B"] + fn vabsh_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.128B"] + fn vabsw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.sat.128B"] + fn vabsw_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.128B"] + fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.hf.128B"] + fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.128B"] + fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.mix.128B"] + fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.128B"] + fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.mix.128B"] + fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.128B"] + fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.hf.128B"] + fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadd.sf.sf.128B"] + fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.128B"] + fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.dv.128B"] + fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddbnq.128B"] + fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbq.128B"] + fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.128B"] + fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.dv.128B"] + fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddcarrysat.128B"] + fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbh.128B"] + fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbw.128B"] + fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.128B"] + fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.dv.128B"] + fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhnq.128B"] + fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhq.128B"] + fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.128B"] + fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.dv.128B"] + fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.128B"] + fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.acc.128B"] + fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.128B"] + fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.acc.128B"] + fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubsat.128B"] + fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddubsat.dv.128B"] + fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddububb.sat.128B"] + fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.128B"] + fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.dv.128B"] + fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.128B"] + fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.acc.128B"] + fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduwsat.128B"] + fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduwsat.dv.128B"] + fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddw.128B"] + fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddw.dv.128B"] + fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddwnq.128B"] + fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwq.128B"] + fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.128B"] + fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.dv.128B"] + fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.valignb.128B"] + fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.valignbi.128B"] + fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vand.128B"] + fn vand(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.128B"] + fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.acc.128B"] + fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.128B"] + fn vandqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.acc.128B"] + fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvnqv.128B"] + fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvqv.128B"] + fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.128B"] + fn vandvrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.acc.128B"] + fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.128B"] + fn vaslh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.acc.128B"] + fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslhv.128B"] + fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.128B"] + fn vaslw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.acc.128B"] + fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslwv.128B"] + fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasr.into.128B"] + fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vasrh.128B"] + fn vasrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrh.acc.128B"] + fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbrndsat.128B"] + fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbsat.128B"] + fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubrndsat.128B"] + fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubsat.128B"] + fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhv.128B"] + fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubrndsat.128B"] + fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubsat.128B"] + fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhrndsat.128B"] + fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhsat.128B"] + fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat.128B"] + fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubsat.128B"] + fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat.128B"] + fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhsat.128B"] + fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.128B"] + fn vasrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.acc.128B"] + fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwh.128B"] + fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhrndsat.128B"] + fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhsat.128B"] + fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhrndsat.128B"] + fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhsat.128B"] + fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwv.128B"] + fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.128B"] + fn vassign(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.fp.128B"] + fn vassign_fp(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassignp.128B"] + fn vassignp(_: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vavgb.128B"] + fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgbrnd.128B"] + fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgh.128B"] + fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavghrnd.128B"] + fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgub.128B"] + fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgubrnd.128B"] + fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguh.128B"] + fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguhrnd.128B"] + fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguw.128B"] + fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguwrnd.128B"] + fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgw.128B"] + fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgwrnd.128B"] + fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0h.128B"] + fn vcl0h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0w.128B"] + fn vcl0w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcombine.128B"] + fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vconv.h.hf.128B"] + fn vconv_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.h.128B"] + fn vconv_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf16.128B"] + fn vconv_hf_qf16(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf32.128B"] + fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.qf32.128B"] + fn vconv_sf_qf32(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.w.128B"] + fn vconv_sf_w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.w.sf.128B"] + fn vconv_w_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.b.128B"] + fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub.128B"] + fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.b.hf.128B"] + fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.h.hf.128B"] + fn vcvt_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.b.128B"] + fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.f8.128B"] + fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.h.128B"] + fn vcvt_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.sf.128B"] + fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.ub.128B"] + fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.uh.128B"] + fn vcvt_hf_uh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.sf.hf.128B"] + fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.ub.hf.128B"] + fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.uh.hf.128B"] + fn vcvt_uh_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vd0.128B"] + fn vd0() -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdd0.128B"] + fn vdd0() -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdealb.128B"] + fn vdealb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealb4w.128B"] + fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealh.128B"] + fn vdealh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealvdd.128B"] + fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdelta.128B"] + fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.128B"] + fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc.128B"] + fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.128B"] + fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.acc.128B"] + fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.128B"] + fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc.128B"] + fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.128B"] + fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.acc.128B"] + fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.128B"] + fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc.128B"] + fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.128B"] + fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc.128B"] + fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.128B"] + fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc.128B"] + fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.128B"] + fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc.128B"] + fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.128B"] + fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc.128B"] + fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.128B"] + fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc.128B"] + fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdsaduh.128B"] + fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdsaduh.acc.128B"] + fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.veqb.128B"] + fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.and.128B"] + fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.or.128B"] + fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.xor.128B"] + fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.128B"] + fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.and.128B"] + fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.or.128B"] + fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.xor.128B"] + fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.128B"] + fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.and.128B"] + fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.or.128B"] + fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.xor.128B"] + fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.f8.128B"] + fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.hf.128B"] + fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.sf.128B"] + fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.f8.128B"] + fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.hf.128B"] + fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.sf.128B"] + fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.f8.128B"] + fn vfneg_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.hf.128B"] + fn vfneg_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.sf.128B"] + fn vfneg_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgathermh.128B"] + fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhq.128B"] + fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhw.128B"] + fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhwq.128B"] + fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermw.128B"] + fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermwq.128B"] + fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgtb.128B"] + fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.and.128B"] + fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.or.128B"] + fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.xor.128B"] + fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.128B"] + fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.and.128B"] + fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.or.128B"] + fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.xor.128B"] + fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.128B"] + fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.and.128B"] + fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.or.128B"] + fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.xor.128B"] + fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.128B"] + fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.and.128B"] + fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.or.128B"] + fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.xor.128B"] + fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.128B"] + fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.and.128B"] + fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.or.128B"] + fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.xor.128B"] + fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.128B"] + fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.and.128B"] + fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.or.128B"] + fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.xor.128B"] + fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.128B"] + fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.and.128B"] + fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.or.128B"] + fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.xor.128B"] + fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.128B"] + fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.and.128B"] + fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.or.128B"] + fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.xor.128B"] + fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vinsertwr.128B"] + fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignb.128B"] + fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignbi.128B"] + fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrb.128B"] + fn vlsrb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrh.128B"] + fn vlsrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrhv.128B"] + fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrw.128B"] + fn vlsrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrwv.128B"] + fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.128B"] + fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.nm.128B"] + fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracc.128B"] + fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracci.128B"] + fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvbi.128B"] + fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvwh.128B"] + fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.nm.128B"] + fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracc.128B"] + fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracci.128B"] + fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwhi.128B"] + fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmax.hf.128B"] + fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmax.sf.128B"] + fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxb.128B"] + fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxh.128B"] + fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxub.128B"] + fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxuh.128B"] + fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxw.128B"] + fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.hf.128B"] + fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.sf.128B"] + fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminb.128B"] + fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminh.128B"] + fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminub.128B"] + fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminuh.128B"] + fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminw.128B"] + fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpabus.128B"] + fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabus.acc.128B"] + fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabusv.128B"] + fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.128B"] + fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.acc.128B"] + fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuuv.128B"] + fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.128B"] + fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.acc.128B"] + fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.128B"] + fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.acc.128B"] + fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.128B"] + fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc.128B"] + fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.128B"] + fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf.128B"] + fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf.128B"] + fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.128B"] + fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf.128B"] + fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf.128B"] + fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16.128B"] + fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf.128B"] + fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.128B"] + fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc.128B"] + fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.sf.128B"] + fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpybus.128B"] + fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybus.acc.128B"] + fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.128B"] + fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.acc.128B"] + fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.128B"] + fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.acc.128B"] + fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyewuh.128B"] + fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyewuh.64.128B"] + fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.128B"] + fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.acc.128B"] + fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsat.acc.128B"] + fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsrs.128B"] + fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhss.128B"] + fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhus.128B"] + fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhus.acc.128B"] + fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.128B"] + fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.acc.128B"] + fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhvsrs.128B"] + fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyieoh.128B"] + fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewh.acc.128B"] + fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.128B"] + fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc.128B"] + fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.128B"] + fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.acc.128B"] + fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.128B"] + fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.acc.128B"] + fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiowh.128B"] + fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.128B"] + fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.acc.128B"] + fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.128B"] + fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.acc.128B"] + fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.128B"] + fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.acc.128B"] + fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.128B"] + fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc.128B"] + fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.128B"] + fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc.128B"] + fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.sacc.128B"] + fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyub.128B"] + fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyub.acc.128B"] + fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.128B"] + fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.acc.128B"] + fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.128B"] + fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.acc.128B"] + fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhe.128B"] + fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhe.acc.128B"] + fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhv.128B"] + fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhv.acc.128B"] + fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhvs.128B"] + fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmux.128B"] + fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgb.128B"] + fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgh.128B"] + fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgub.128B"] + fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgw.128B"] + fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamth.128B"] + fn vnormamth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamtw.128B"] + fn vnormamtw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnot.128B"] + fn vnot(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vor.128B"] + fn vor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeb.128B"] + fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeh.128B"] + fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhb.sat.128B"] + fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhub.sat.128B"] + fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackob.128B"] + fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackoh.128B"] + fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwh.sat.128B"] + fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwuh.sat.128B"] + fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpopcounth.128B"] + fn vpopcounth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqb.128B"] + fn vprefixqb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqh.128B"] + fn vprefixqh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqw.128B"] + fn vprefixqw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrdelta.128B"] + fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.128B"] + fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.acc.128B"] + fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusi.128B"] + fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusi.acc.128B"] + fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusv.128B"] + fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusv.acc.128B"] + fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.128B"] + fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.acc.128B"] + fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.128B"] + fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.acc.128B"] + fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubi.128B"] + fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubi.acc.128B"] + fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubv.128B"] + fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubv.acc.128B"] + fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vror.128B"] + fn vror(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrotr.128B"] + fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhb.128B"] + fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhub.128B"] + fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduhub.128B"] + fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduwuh.128B"] + fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwh.128B"] + fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwuh.128B"] + fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrsadubi.128B"] + fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrsadubi.acc.128B"] + fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsatdw.128B"] + fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsathub.128B"] + fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatuwuh.128B"] + fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatwh.128B"] + fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsb.128B"] + fn vsb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vscattermh.128B"] + fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermh.add.128B"] + fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhq.128B"] + fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.128B"] + fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.add.128B"] + fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhwq.128B"] + fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.128B"] + fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.add.128B"] + fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermwq.128B"] + fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vsh.128B"] + fn vsh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufeh.128B"] + fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffb.128B"] + fn vshuffb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffeb.128B"] + fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffh.128B"] + fn vshuffh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffob.128B"] + fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffvdd.128B"] + fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeb.128B"] + fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeh.128B"] + fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoh.128B"] + fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.128B"] + fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.hf.128B"] + fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.128B"] + fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.mix.128B"] + fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.128B"] + fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.mix.128B"] + fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.128B"] + fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.hf.128B"] + fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsub.sf.sf.128B"] + fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.128B"] + fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.dv.128B"] + fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubbnq.128B"] + fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbq.128B"] + fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.128B"] + fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.dv.128B"] + fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubh.128B"] + fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubh.dv.128B"] + fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhnq.128B"] + fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhq.128B"] + fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.128B"] + fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.dv.128B"] + fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhw.128B"] + fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububh.128B"] + fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububsat.128B"] + fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsububsat.dv.128B"] + fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubububb.sat.128B"] + fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.128B"] + fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.dv.128B"] + fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuhw.128B"] + fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuwsat.128B"] + fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuwsat.dv.128B"] + fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubw.128B"] + fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubw.dv.128B"] + fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubwnq.128B"] + fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwq.128B"] + fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.128B"] + fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.dv.128B"] + fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vswap.128B"] + fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.128B"] + fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.acc.128B"] + fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.128B"] + fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.acc.128B"] + fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.128B"] + fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.acc.128B"] + fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackb.128B"] + fn vunpackb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackh.128B"] + fn vunpackh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackob.128B"] + fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackoh.128B"] + fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackub.128B"] + fn vunpackub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackuh.128B"] + fn vunpackuh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vxor.128B"] + fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vzb.128B"] + fn vzb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vzh.128B"] + fn vzh(_: HvxVector) -> HvxVectorPair; +} + +/// `Rd32=vextract(Vu32,Rs32)` +/// +/// Instruction Type: LD +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(extractw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { + extractw(vu, rs) +} + +/// `Vd32=hi(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(hi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { + hi(vss) +} + +/// `Vd32=lo(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lo))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { + lo(vss) +} + +/// `Vd32=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lvsplatw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { + lvsplatw(rt) +} + +/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffh(vu, vv) +} + +/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffub(vu, vv) +} + +/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffuh(vu, vv) +} + +/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffw(vu, vv) +} + +/// `Vd32.h=vabs(Vu32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { + vabsh(vu) +} + +/// `Vd32.h=vabs(Vu32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { + vabsh_sat(vu) +} + +/// `Vd32.w=vabs(Vu32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { + vabsw(vu) +} + +/// `Vd32.w=vabs(Vu32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { + vabsw_sat(vu) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddb(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddb_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddh(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddh_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddhsat(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddhw(vu, vv) +} + +/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddubh(vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddubsat(vu, vv) +} + +/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddubsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduhsat(vu, vv) +} + +/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadduhw(vu, vv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_add(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddw_dv(vuu, vvv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddwsat(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddwsat_dv(vuu, vvv) +} + +/// `Vd32=valign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + valignb(vu, vv, rt) +} + +/// `Vd32=valign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + valignbi(vu, vv, iu3) +} + +/// `Vd32=vand(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vand))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_and(vu, vv) +} + +/// `Vd32.h=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vaslh(vu, rt) +} + +/// `Vd32.h=vasl(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslhv(vu, vv) +} + +/// `Vd32.w=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vaslw(vu, rt) +} + +/// `Vx32.w+=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslw_acc(vx, vu, rt) +} + +/// `Vd32.w=vasl(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslwv(vu, vv) +} + +/// `Vd32.h=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vasrh(vu, rt) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhbrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrhv(vu, vv) +} + +/// `Vd32.w=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vasrw(vu, rt) +} + +/// `Vx32.w+=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrw_acc(vx, vu, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwh(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhrndsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhsat(vu, vv, rt) +} + +/// `Vd32.w=vasr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrwv(vu, vv) +} + +/// `Vd32=Vu32` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassign))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { + vassign(vu) +} + +/// `Vdd32=Vuu32` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassignp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { + vassignp(vuu) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgh(vu, vv) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavghrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavghrnd(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgub(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgubrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgubrnd(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguh(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguhrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguhrnd(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgw(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgwrnd(vu, vv) +} + +/// `Vd32.uh=vcl0(Vu32.uh)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { + vcl0h(vu) +} + +/// `Vd32.uw=vcl0(Vu32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { + vcl0w(vu) +} + +/// `Vdd32=vcombine(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcombine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vcombine(vu, vv) +} + +/// `Vd32=#0` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vzero() -> HvxVector { + vd0() +} + +/// `Vd32.b=vdeal(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { + vdealb(vu) +} + +/// `Vd32.b=vdeale(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb4w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdealb4w(vu, vv) +} + +/// `Vd32.h=vdeal(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { + vdealh(vu) +} + +/// `Vdd32=vdeal(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vdealvdd(vu, vv, rt) +} + +/// `Vd32=vdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdelta(vu, vv) +} + +/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus(vu, rt) +} + +/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpybus_dv(vuu, rt) +} + +/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpybus_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb_acc(vx, vu, rt) +} + +/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpyhb_dv(vuu, rt) +} + +/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpyhb_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat(vu, vv) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat_acc(vx, vu, vv) +} + +/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdsaduh(vuu, rt) +} + +/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdsaduh_acc(vxx, vuu, rt) +} + +/// `Vx32.w=vinsert(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vinsertwr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { + vinsertwr(vx, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlalignb(vu, vv, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlalignbi(vu, vv, iu3) +} + +/// `Vd32.uh=vlsr(Vu32.uh,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrh(vu, rt) +} + +/// `Vd32.h=vlsr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrhv(vu, vv) +} + +/// `Vd32.uw=vlsr(Vu32.uw,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrw(vu, rt) +} + +/// `Vd32.w=vlsr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrwv(vu, vv) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbr( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVector { + vlutvvb_oracc(vx, vu, vv, rt) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhr( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVectorPair { + vlutvwh_oracc(vxx, vu, vv, rt) +} + +/// `Vd32.h=vmax(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxh(vu, vv) +} + +/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxub(vu, vv) +} + +/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxuh(vu, vv) +} + +/// `Vd32.w=vmax(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxw(vu, vv) +} + +/// `Vd32.h=vmin(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminh(vu, vv) +} + +/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminub(vu, vv) +} + +/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminuh(vu, vv) +} + +/// `Vd32.w=vmin(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminw(vu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabus(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabus_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabusv(vuu, vvv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabuuv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabuuv(vuu, vvv) +} + +/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpahb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpahb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus(vu, rt) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus_acc(vxx, vu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybusv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybusv_acc(vxx, vu, vv) +} + +/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvbvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybv_acc(vxx, vu, vv) +} + +/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyewuh(vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh(vu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyhsat_acc(vxx, vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhsrs(vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhss))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhss(vu, rt) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhus(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhus_acc(vxx, vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhv(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhvsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyhvsrs(vu, vv) +} + +/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyieoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyieoh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewh_acc(vx, vu, vv) +} + +/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih(vu, vv) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb(vu, rt) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiowh(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh(vu, vv) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh_rnd(vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_rnd_sacc(vx, vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_sacc(vx, vu, vv) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyub(vu, rt) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyub_acc(vxx, vu, rt) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyubv(vu, vv) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyubv_acc(vxx, vu, vv) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyuh(vu, rt) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyuh_acc(vxx, vu, rt) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyuhv(vu, vv) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyuhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vnavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgh(vu, vv) +} + +/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgub(vu, vv) +} + +/// `Vd32.w=vnavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgw(vu, vv) +} + +/// `Vd32.h=vnormamt(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { + vnormamth(vu) +} + +/// `Vd32.w=vnormamt(Vu32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamtw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { + vnormamtw(vu) +} + +/// `Vd32=vnot(Vu32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnot))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { + vnot(vu) +} + +/// `Vd32=vor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_or(vu, vv) +} + +/// `Vd32.b=vpacke(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeb(vu, vv) +} + +/// `Vd32.h=vpacke(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeh(vu, vv) +} + +/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhb_sat(vu, vv) +} + +/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhub_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhub_sat(vu, vv) +} + +/// `Vd32.b=vpacko(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackob(vu, vv) +} + +/// `Vd32.h=vpacko(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackoh(vu, vv) +} + +/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwh_sat(vu, vv) +} + +/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwuh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwuh_sat(vu, vv) +} + +/// `Vd32.h=vpopcount(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpopcounth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { + vpopcounth(vu) +} + +/// `Vd32=vrdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrdelta(vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus(vu, rt) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpybusi(vuu, rt, iu1) +} + +/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpybusi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv_acc(vx, vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv_acc(vx, vu, vv) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub(vu, rt) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub_acc(vx, vu, rt) +} + +/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpyubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpyubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv(vu, vv) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv_acc(vx, vu, vv) +} + +/// `Vd32=vror(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vror))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { + vror(vu, rt) +} + +/// `Vd32.b=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhb(vu, vv) +} + +/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhub(vu, vv) +} + +/// `Vd32.h=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwh(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwuh(vu, vv) +} + +/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrsadubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrsadubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.ub=vsat(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsathub(vu, vv) +} + +/// `Vd32.h=vsat(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatwh(vu, vv) +} + +/// `Vdd32.h=vsxt(Vu32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { + vsb(vu) +} + +/// `Vdd32.w=vsxt(Vu32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { + vsh(vu) +} + +/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufeh(vu, vv) +} + +/// `Vd32.b=vshuff(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { + vshuffb(vu) +} + +/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffeb(vu, vv) +} + +/// `Vd32.h=vshuff(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { + vshuffh(vu) +} + +/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffob(vu, vv) +} + +/// `Vdd32=vshuff(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vshuffvdd(vu, vv, rt) +} + +/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeb(vu, vv) +} + +/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeh(vu, vv) +} + +/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufoh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubb(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubb_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubh(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubh_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubhsat(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubhw(vu, vv) +} + +/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsububh(vu, vv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsububsat(vu, vv) +} + +/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsububsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuhsat(vu, vv) +} + +/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubuhw(vu, vv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_sub(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubw_dv(vuu, vvv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubwsat(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubwsat_dv(vuu, vvv) +} + +/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyb(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwbrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpybus(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpybus_acc(vxx, vuu, rt) +} + +/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyhb(vuu, rt) +} + +/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyhb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vunpack(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { + vunpackb(vu) +} + +/// `Vdd32.w=vunpack(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { + vunpackh(vu) +} + +/// `Vxx32.h|=vunpacko(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackob(vxx, vu) +} + +/// `Vxx32.w|=vunpacko(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackoh(vxx, vu) +} + +/// `Vdd32.uh=vunpack(Vu32.ub)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { + vunpackub(vu) +} + +/// `Vdd32.uw=vunpack(Vu32.uh)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { + vunpackuh(vu) +} + +/// `Vd32=vxor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vxor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_xor(vu, vv) +} + +/// `Vdd32.uh=vzxt(Vu32.ub)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { + vzb(vu) +} + +/// `Vdd32.uw=vzxt(Vu32.uh)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { + vzh(vu) +} + +/// `Vd32.b=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { + lvsplatb(rt) +} + +/// `Vd32.h=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { + lvsplath(rt) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddbsat(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddbsat_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbh(vu, vv) +} + +/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbw(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddhw_acc(vxx, vu, vv) +} + +/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddubh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vaddacc_whvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddubh_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddububb_sat(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vadduhw_acc(vxx, vu, vv) +} + +/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduwsat(vu, vv) +} + +/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrhbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasruwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhrndsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vlsr(Vu32.ub,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlsrb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrb(vu, rt) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb_nm(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbi( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVector { + vlutvvb_oracci(vx, vu, vv, iu3) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlutvvbi(vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh_nm(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhi( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVectorPair { + vlutvwh_oracci(vxx, vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwhi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { + vlutvwhi(vu, vv, iu3) +} + +/// `Vd32.b=vmax(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmaxb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxb(vu, vv) +} + +/// `Vd32.b=vmin(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vminb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminb(vu, vv) +} + +/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpauhb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwuhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpauhb_acc(vxx, vuu, rt) +} + +/// `Vdd32=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyewuh_64))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyewuh_64(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub_acc(vx, vu, rt) +} + +/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyowh_64_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpyoacc_wvwvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyowh_64_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduhub(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduwuh(vu, vv) +} + +/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsatuwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatuwuh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubbsat(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubbsat_dv(vuu, vvv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubububb_sat(vu, vv) +} + +/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuwsat(vu, vv) +} + +/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vabs(Vu32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { + vabsb(vu) +} + +/// `Vd32.b=vabs(Vu32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { + vabsb_sat(vu) +} + +/// `Vx32.h+=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vaslh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslh_acc(vx, vu, rt) +} + +/// `Vx32.h+=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasrh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrh_acc(vx, vu, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhsat(vu, vv, rt) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgb(vu, vv) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgbrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgbrnd(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguw(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguwrnd(vu, vv) +} + +/// `Vdd32=#0` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vdd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vzero() -> HvxVectorPair { + vdd0() +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermh(rs, rt, mu, vv) +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { + vgathermhw(rs, rt, mu, vvv) +} + +/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermw(rs, rt, mu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabuu(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrub( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabuu_acc(vxx, vuu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh_acc(vxx, vu, rt) +} + +/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe(vu, rt) +} + +/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe_acc(vx, vu, rt) +} + +/// `Vd32.b=vnavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vnavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgb(vu, vv) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh_add(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw_add(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw_add(rt, mu, vv, vw) +} + +/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vasr_into))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vasrinto_wwvwvw( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vasr_into(vxx, vu, vv) +} + +/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vrotr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrotr(vu, vv) +} + +/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vsatdw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatdw(vu, vv) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_h( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_v( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vd32.hf=vabs(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { + vabs_hf(vu) +} + +/// `Vd32.sf=vabs(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { + vabs_sf(vu) +} + +/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf(vu, vv) +} + +/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf(vu, vv) +} + +/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadd_sf_hf(vu, vv) +} + +/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf_sf(vu, vv) +} + +/// `Vd32.w=vfmv(Vu32.w)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vassign_fp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { + vassign_fp(vu) +} + +/// `Vd32.hf=Vu32.qf16` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { + vconv_hf_qf16(vu) +} + +/// `Vd32.hf=Vuu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { + vconv_hf_qf32(vuu) +} + +/// `Vd32.sf=Vu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_sf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { + vconv_sf_qf32(vu) +} + +/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_b_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_b_hf(vu, vv) +} + +/// `Vd32.h=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_h_hf(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_b(vu) +} + +/// `Vd32.hf=vcvt(Vu32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { + vcvt_hf_h(vu) +} + +/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_hf_sf(vu, vv) +} + +/// `Vdd32.hf=vcvt(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_ub(vu) +} + +/// `Vd32.hf=vcvt(Vu32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_uh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { + vcvt_hf_uh(vu) +} + +/// `Vdd32.sf=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { + vcvt_sf_hf(vu) +} + +/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_ub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_ub_hf(vu, vv) +} + +/// `Vd32.uh=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_uh_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_uh_hf(vu) +} + +/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf(vu, vv) +} + +/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf_acc(vx, vu, vv) +} + +/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_hf(vu, vv) +} + +/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_sf(vu, vv) +} + +/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_hf(vu, vv) +} + +/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_sf(vu, vv) +} + +/// `Vd32.hf=vfneg(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { + vfneg_hf(vu) +} + +/// `Vd32.sf=vfneg(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { + vfneg_sf(vu) +} + +/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_hf(vu, vv) +} + +/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_sf(vu, vv) +} + +/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_hf(vu, vv) +} + +/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_sf(vu, vv) +} + +/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf(vu, vv) +} + +/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf_acc(vx, vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_hf(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_mix_hf(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_mix_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_qf16(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32_sf(vu, vv) +} + +/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_sf_hf(vu, vv) +} + +/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpy_sf_hf_acc(vxx, vu, vv) +} + +/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_sf_sf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf(vu, vv) +} + +/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf(vu, vv) +} + +/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsub_sf_hf(vu, vv) +} + +/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf_sf(vu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubrndsat(vuu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhrndsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhsat(vuu, vv) +} + +/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vmpyuhvs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyuhvs(vu, vv) +} + +/// `Vd32.h=Vu32.hf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { + vconv_h_hf(vu) +} + +/// `Vd32.hf=Vu32.h` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { + vconv_hf_h(vu) +} + +/// `Vd32.sf=Vu32.w` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_sf_w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { + vconv_sf_w(vu) +} + +/// `Vd32.w=Vu32.sf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_w_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { + vconv_w_sf(vu) +} + +/// `Vd32=vgetqfext(Vu32.x,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(get_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + get_qfext(vu, rt) +} + +/// `Vd32.x=vsetqfext(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(set_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + set_qfext(vu, rt) +} + +/// `Vd32.f8=vabs(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vabs_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { + vabs_f8(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_b(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_ub(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt_hf_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_f8(vu) +} + +/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmax_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_f8(vu, vv) +} + +/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmin_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_f8(vu, vv) +} + +/// `Vd32.f8=vfneg(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfneg_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { + vfneg_f8(vu) +} + +/// `Qd4=and(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=and(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=not(Qs4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_not(vandvrt( + core::mem::transmute::(qs), + -1, + )), + -1, + )) +} + +/// `Qd4=or(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=or(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=vsetq(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) +} + +/// `Qd4=xor(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_xor( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `if (!Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vd32=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt(core::mem::transmute::(qu), rt) +} + +/// `Vx32|=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt_acc(vx, core::mem::transmute::(qu), rt) +} + +/// `Qd4=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vu, rt)) +} + +/// `Qx4|=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt_acc( + core::mem::transmute::(qx), + vu, + rt, + )) +} + +/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Vd32=vmux(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmux( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `if (!Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vdd32=vswap(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vswap( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `Qd4=vsetq2(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) +} + +/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqh( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqw( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Vd32=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt( + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vx32|=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt_acc( + vx, + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vd32=vand(!Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvnqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `Vd32=vand(Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvh( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermhq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmww( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, +) { + vgathermhwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + ) +} + +/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvw( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `Vd32.b=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqb(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.h=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqh(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.w=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqw(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvhv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermhq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmwwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, + vw: HvxVector, +) { + vscattermhwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( + vu: HvxVector, + vv: HvxVector, + qs: HvxVectorPred, +) -> HvxVector { + vaddcarrysat( + vu, + vv, + vandvrt(core::mem::transmute::(qs), -1), + ) +} + +/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} diff --git a/crates/core_arch/src/hexagon/v64.rs b/crates/core_arch/src/hexagon/v64.rs new file mode 100644 index 0000000000..023a8711d2 --- /dev/null +++ b/crates/core_arch/src/hexagon/v64.rs @@ -0,0 +1,7489 @@ +//! Hexagon HVX 64-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in 64-byte vector mode (512-bit vectors). +//! +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! In 64-byte mode: +//! - `HvxVector` is 512 bits (64 bytes) containing 16 x 32-bit values +//! - `HvxVectorPair` is 1024 bits (128 bytes) +//! - `HvxVectorPred` is 512 bits (64 bytes) for predicate operations +//! +//! To use this module, compile with `-C target-feature=+hvx-length64b`. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! +//! Each version includes all features from previous versions. + +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{simd_add, simd_and, simd_or, simd_sub, simd_xor}; + +// HVX type definitions for 64-byte vector mode +types! { + #![unstable(feature = "stdarch_hexagon", issue = "151523")] + + /// HVX vector type (512 bits / 64 bytes) + /// + /// This type represents a single HVX vector register containing 16 x 32-bit values. + pub struct HvxVector(16 x i32); + + /// HVX vector pair type (1024 bits / 128 bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair(32 x i32); + + /// HVX vector predicate type (512 bits / 64 bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred(16 x i32); +} + +// LLVM intrinsic declarations for 64-byte vector mode +#[allow(improper_ctypes)] +unsafe extern "unadjusted" { + #[link_name = "llvm.hexagon.V6.extractw"] + fn extractw(_: HvxVector, _: i32) -> i32; + #[link_name = "llvm.hexagon.V6.get.qfext"] + fn get_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.hi"] + fn hi(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lo"] + fn lo(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatb"] + fn lvsplatb(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplath"] + fn lvsplath(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.lvsplatw"] + fn lvsplatw(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and"] + fn pred_and(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.and.n"] + fn pred_and_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.not"] + fn pred_not(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or"] + fn pred_or(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.or.n"] + fn pred_or_n(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2"] + fn pred_scalar2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.scalar2v2"] + fn pred_scalar2v2(_: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.pred.xor"] + fn pred_xor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.set.qfext"] + fn set_qfext(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqh"] + fn shuffeqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.shuffeqw"] + fn shuffeqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10"] + fn v6mpyhubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyhubs10.vxx"] + fn v6mpyhubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10"] + fn v6mpyvubs10(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.v6mpyvubs10.vxx"] + fn v6mpyvubs10_vxx( + _: HvxVectorPair, + _: HvxVectorPair, + _: HvxVectorPair, + _: i32, + ) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vS32b.nqpred.ai"] + fn vS32b_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.nqpred.ai"] + fn vS32b_nt_nqpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.nt.qpred.ai"] + fn vS32b_nt_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vS32b.qpred.ai"] + fn vS32b_qpred_ai(_: HvxVector, _: *mut HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vabs.f8"] + fn vabs_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.hf"] + fn vabs_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabs.sf"] + fn vabs_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb"] + fn vabsb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsb.sat"] + fn vabsb_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffh"] + fn vabsdiffh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffub"] + fn vabsdiffub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffuh"] + fn vabsdiffuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsdiffw"] + fn vabsdiffw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh"] + fn vabsh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsh.sat"] + fn vabsh_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw"] + fn vabsw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vabsw.sat"] + fn vabsw_sat(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf"] + fn vadd_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.hf.hf"] + fn vadd_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16"] + fn vadd_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf16.mix"] + fn vadd_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32"] + fn vadd_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.qf32.mix"] + fn vadd_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf"] + fn vadd_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadd.sf.hf"] + fn vadd_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadd.sf.sf"] + fn vadd_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb"] + fn vaddb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddb.dv"] + fn vaddb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddbnq"] + fn vaddbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbq"] + fn vaddbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat"] + fn vaddbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddbsat.dv"] + fn vaddbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddcarrysat"] + fn vaddcarrysat(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbh"] + fn vaddclbh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddclbw"] + fn vaddclbw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh"] + fn vaddh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddh.dv"] + fn vaddh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhnq"] + fn vaddhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhq"] + fn vaddhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat"] + fn vaddhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddhsat.dv"] + fn vaddhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw"] + fn vaddhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddhw.acc"] + fn vaddhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh"] + fn vaddubh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubh.acc"] + fn vaddubh_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddubsat"] + fn vaddubsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddubsat.dv"] + fn vaddubsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddububb.sat"] + fn vaddububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat"] + fn vadduhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduhsat.dv"] + fn vadduhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw"] + fn vadduhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduhw.acc"] + fn vadduhw_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vadduwsat"] + fn vadduwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vadduwsat.dv"] + fn vadduwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddw"] + fn vaddw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddw.dv"] + fn vaddw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vaddwnq"] + fn vaddwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwq"] + fn vaddwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat"] + fn vaddwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaddwsat.dv"] + fn vaddwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.valignb"] + fn valignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.valignbi"] + fn valignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vand"] + fn vand(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt"] + fn vandnqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandnqrt.acc"] + fn vandnqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt"] + fn vandqrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandqrt.acc"] + fn vandqrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvnqv"] + fn vandvnqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvqv"] + fn vandvqv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt"] + fn vandvrt(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vandvrt.acc"] + fn vandvrt_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh"] + fn vaslh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslh.acc"] + fn vaslh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslhv"] + fn vaslhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw"] + fn vaslw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslw.acc"] + fn vaslw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vaslwv"] + fn vaslwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasr.into"] + fn vasr_into(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vasrh"] + fn vasrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrh.acc"] + fn vasrh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbrndsat"] + fn vasrhbrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhbsat"] + fn vasrhbsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubrndsat"] + fn vasrhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhubsat"] + fn vasrhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrhv"] + fn vasrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubrndsat"] + fn vasruhubrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruhubsat"] + fn vasruhubsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhrndsat"] + fn vasruwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasruwuhsat"] + fn vasruwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubrndsat"] + fn vasrvuhubrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvuhubsat"] + fn vasrvuhubsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhrndsat"] + fn vasrvwuhrndsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrvwuhsat"] + fn vasrvwuhsat(_: HvxVectorPair, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw"] + fn vasrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrw.acc"] + fn vasrw_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwh"] + fn vasrwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhrndsat"] + fn vasrwhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwhsat"] + fn vasrwhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhrndsat"] + fn vasrwuhrndsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwuhsat"] + fn vasrwuhsat(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vasrwv"] + fn vasrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign"] + fn vassign(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassign.fp"] + fn vassign_fp(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vassignp"] + fn vassignp(_: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vavgb"] + fn vavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgbrnd"] + fn vavgbrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgh"] + fn vavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavghrnd"] + fn vavghrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgub"] + fn vavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgubrnd"] + fn vavgubrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguh"] + fn vavguh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguhrnd"] + fn vavguhrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguw"] + fn vavguw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavguwrnd"] + fn vavguwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgw"] + fn vavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vavgwrnd"] + fn vavgwrnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0h"] + fn vcl0h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcl0w"] + fn vcl0w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcombine"] + fn vcombine(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vconv.h.hf"] + fn vconv_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.h"] + fn vconv_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf16"] + fn vconv_hf_qf16(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.hf.qf32"] + fn vconv_hf_qf32(_: HvxVectorPair) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.qf32"] + fn vconv_sf_qf32(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.sf.w"] + fn vconv_sf_w(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vconv.w.sf"] + fn vconv_w_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.b"] + fn vcvt2_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt2.hf.ub"] + fn vcvt2_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.b.hf"] + fn vcvt_b_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.h.hf"] + fn vcvt_h_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.b"] + fn vcvt_hf_b(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.f8"] + fn vcvt_hf_f8(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.h"] + fn vcvt_hf_h(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.sf"] + fn vcvt_hf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.hf.ub"] + fn vcvt_hf_ub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.hf.uh"] + fn vcvt_hf_uh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.sf.hf"] + fn vcvt_sf_hf(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vcvt.ub.hf"] + fn vcvt_ub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vcvt.uh.hf"] + fn vcvt_uh_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vd0"] + fn vd0() -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdd0"] + fn vdd0() -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdealb"] + fn vdealb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealb4w"] + fn vdealb4w(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealh"] + fn vdealh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdealvdd"] + fn vdealvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdelta"] + fn vdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf"] + fn vdmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpy.sf.hf.acc"] + fn vdmpy_sf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus"] + fn vdmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.acc"] + fn vdmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv"] + fn vdmpybus_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpybus.dv.acc"] + fn vdmpybus_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb"] + fn vdmpyhb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.acc"] + fn vdmpyhb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv"] + fn vdmpyhb_dv(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhb.dv.acc"] + fn vdmpyhb_dv_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdmpyhisat"] + fn vdmpyhisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhisat.acc"] + fn vdmpyhisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat"] + fn vdmpyhsat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsat.acc"] + fn vdmpyhsat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat"] + fn vdmpyhsuisat(_: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsuisat.acc"] + fn vdmpyhsuisat_acc(_: HvxVector, _: HvxVectorPair, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat"] + fn vdmpyhsusat(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhsusat.acc"] + fn vdmpyhsusat_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat"] + fn vdmpyhvsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdmpyhvsat.acc"] + fn vdmpyhvsat_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vdsaduh"] + fn vdsaduh(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vdsaduh.acc"] + fn vdsaduh_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.veqb"] + fn veqb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.and"] + fn veqb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.or"] + fn veqb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqb.xor"] + fn veqb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh"] + fn veqh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.and"] + fn veqh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.or"] + fn veqh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqh.xor"] + fn veqh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw"] + fn veqw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.and"] + fn veqw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.or"] + fn veqw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.veqw.xor"] + fn veqw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.f8"] + fn vfmax_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.hf"] + fn vfmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmax.sf"] + fn vfmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.f8"] + fn vfmin_f8(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.hf"] + fn vfmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfmin.sf"] + fn vfmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.f8"] + fn vfneg_f8(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.hf"] + fn vfneg_hf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vfneg.sf"] + fn vfneg_sf(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgathermh"] + fn vgathermh(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhq"] + fn vgathermhq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhw"] + fn vgathermhw(_: *mut HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermhwq"] + fn vgathermhwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVectorPair) -> (); + #[link_name = "llvm.hexagon.V6.vgathermw"] + fn vgathermw(_: *mut HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgathermwq"] + fn vgathermwq(_: *mut HvxVector, _: HvxVector, _: i32, _: i32, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vgtb"] + fn vgtb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.and"] + fn vgtb_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.or"] + fn vgtb_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtb.xor"] + fn vgtb_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth"] + fn vgth(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.and"] + fn vgth_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.or"] + fn vgth_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgth.xor"] + fn vgth_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf"] + fn vgthf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.and"] + fn vgthf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.or"] + fn vgthf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgthf.xor"] + fn vgthf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf"] + fn vgtsf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.and"] + fn vgtsf_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.or"] + fn vgtsf_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtsf.xor"] + fn vgtsf_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub"] + fn vgtub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.and"] + fn vgtub_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.or"] + fn vgtub_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtub.xor"] + fn vgtub_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh"] + fn vgtuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.and"] + fn vgtuh_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.or"] + fn vgtuh_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuh.xor"] + fn vgtuh_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw"] + fn vgtuw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.and"] + fn vgtuw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.or"] + fn vgtuw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtuw.xor"] + fn vgtuw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw"] + fn vgtw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.and"] + fn vgtw_and(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.or"] + fn vgtw_or(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vgtw.xor"] + fn vgtw_xor(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vinsertwr"] + fn vinsertwr(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignb"] + fn vlalignb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlalignbi"] + fn vlalignbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrb"] + fn vlsrb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrh"] + fn vlsrh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrhv"] + fn vlsrhv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrw"] + fn vlsrw(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlsrwv"] + fn vlsrwv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb"] + fn vlutvvb(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.nm"] + fn vlutvvb_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracc"] + fn vlutvvb_oracc(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvb.oracci"] + fn vlutvvb_oracci(_: HvxVector, _: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvvbi"] + fn vlutvvbi(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vlutvwh"] + fn vlutvwh(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.nm"] + fn vlutvwh_nm(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracc"] + fn vlutvwh_oracc(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwh.oracci"] + fn vlutvwh_oracci(_: HvxVectorPair, _: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vlutvwhi"] + fn vlutvwhi(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmax.hf"] + fn vmax_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmax.sf"] + fn vmax_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxb"] + fn vmaxb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxh"] + fn vmaxh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxub"] + fn vmaxub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxuh"] + fn vmaxuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmaxw"] + fn vmaxw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.hf"] + fn vmin_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmin.sf"] + fn vmin_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminb"] + fn vminb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminh"] + fn vminh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminub"] + fn vminub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminuh"] + fn vminuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vminw"] + fn vminw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpabus"] + fn vmpabus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabus.acc"] + fn vmpabus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabusv"] + fn vmpabusv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu"] + fn vmpabuu(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuu.acc"] + fn vmpabuu_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpabuuv"] + fn vmpabuuv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb"] + fn vmpahb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpahb.acc"] + fn vmpahb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb"] + fn vmpauhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpauhb.acc"] + fn vmpauhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf"] + fn vmpy_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.hf.hf.acc"] + fn vmpy_hf_hf_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16"] + fn vmpy_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.hf"] + fn vmpy_qf16_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf16.mix.hf"] + fn vmpy_qf16_mix_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32"] + fn vmpy_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.hf"] + fn vmpy_qf32_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.mix.hf"] + fn vmpy_qf32_mix_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.qf16"] + fn vmpy_qf32_qf16(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.qf32.sf"] + fn vmpy_qf32_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf"] + fn vmpy_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.hf.acc"] + fn vmpy_sf_hf_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpy.sf.sf"] + fn vmpy_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpybus"] + fn vmpybus(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybus.acc"] + fn vmpybus_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv"] + fn vmpybusv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybusv.acc"] + fn vmpybusv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv"] + fn vmpybv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpybv.acc"] + fn vmpybv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyewuh"] + fn vmpyewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyewuh.64"] + fn vmpyewuh_64(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh"] + fn vmpyh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyh.acc"] + fn vmpyh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsat.acc"] + fn vmpyhsat_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhsrs"] + fn vmpyhsrs(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhss"] + fn vmpyhss(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyhus"] + fn vmpyhus(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhus.acc"] + fn vmpyhus_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv"] + fn vmpyhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhv.acc"] + fn vmpyhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyhvsrs"] + fn vmpyhvsrs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyieoh"] + fn vmpyieoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewh.acc"] + fn vmpyiewh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh"] + fn vmpyiewuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiewuh.acc"] + fn vmpyiewuh_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih"] + fn vmpyih(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyih.acc"] + fn vmpyih_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb"] + fn vmpyihb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyihb.acc"] + fn vmpyihb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiowh"] + fn vmpyiowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb"] + fn vmpyiwb(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwb.acc"] + fn vmpyiwb_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh"] + fn vmpyiwh(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwh.acc"] + fn vmpyiwh_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub"] + fn vmpyiwub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyiwub.acc"] + fn vmpyiwub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh"] + fn vmpyowh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.64.acc"] + fn vmpyowh_64_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd"] + fn vmpyowh_rnd(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.rnd.sacc"] + fn vmpyowh_rnd_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyowh.sacc"] + fn vmpyowh_sacc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyub"] + fn vmpyub(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyub.acc"] + fn vmpyub_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv"] + fn vmpyubv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyubv.acc"] + fn vmpyubv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh"] + fn vmpyuh(_: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuh.acc"] + fn vmpyuh_acc(_: HvxVectorPair, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhe"] + fn vmpyuhe(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhe.acc"] + fn vmpyuhe_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmpyuhv"] + fn vmpyuhv(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhv.acc"] + fn vmpyuhv_acc(_: HvxVectorPair, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vmpyuhvs"] + fn vmpyuhvs(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vmux"] + fn vmux(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgb"] + fn vnavgb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgh"] + fn vnavgh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgub"] + fn vnavgub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnavgw"] + fn vnavgw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamth"] + fn vnormamth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnormamtw"] + fn vnormamtw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vnot"] + fn vnot(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vor"] + fn vor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeb"] + fn vpackeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackeh"] + fn vpackeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhb.sat"] + fn vpackhb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackhub.sat"] + fn vpackhub_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackob"] + fn vpackob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackoh"] + fn vpackoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwh.sat"] + fn vpackwh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpackwuh.sat"] + fn vpackwuh_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vpopcounth"] + fn vpopcounth(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqb"] + fn vprefixqb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqh"] + fn vprefixqh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vprefixqw"] + fn vprefixqw(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrdelta"] + fn vrdelta(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus"] + fn vrmpybus(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybus.acc"] + fn vrmpybus_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusi"] + fn vrmpybusi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusi.acc"] + fn vrmpybusi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpybusv"] + fn vrmpybusv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybusv.acc"] + fn vrmpybusv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv"] + fn vrmpybv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpybv.acc"] + fn vrmpybv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub"] + fn vrmpyub(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyub.acc"] + fn vrmpyub_acc(_: HvxVector, _: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubi"] + fn vrmpyubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubi.acc"] + fn vrmpyubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrmpyubv"] + fn vrmpyubv(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrmpyubv.acc"] + fn vrmpyubv_acc(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vror"] + fn vror(_: HvxVector, _: i32) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrotr"] + fn vrotr(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhb"] + fn vroundhb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundhub"] + fn vroundhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduhub"] + fn vrounduhub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrounduwuh"] + fn vrounduwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwh"] + fn vroundwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vroundwuh"] + fn vroundwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vrsadubi"] + fn vrsadubi(_: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vrsadubi.acc"] + fn vrsadubi_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsatdw"] + fn vsatdw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsathub"] + fn vsathub(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatuwuh"] + fn vsatuwuh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsatwh"] + fn vsatwh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsb"] + fn vsb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vscattermh"] + fn vscattermh(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermh.add"] + fn vscattermh_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhq"] + fn vscattermhq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw"] + fn vscattermhw(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhw.add"] + fn vscattermhw_add(_: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermhwq"] + fn vscattermhwq(_: HvxVector, _: i32, _: i32, _: HvxVectorPair, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw"] + fn vscattermw(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermw.add"] + fn vscattermw_add(_: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vscattermwq"] + fn vscattermwq(_: HvxVector, _: i32, _: i32, _: HvxVector, _: HvxVector) -> (); + #[link_name = "llvm.hexagon.V6.vsh"] + fn vsh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufeh"] + fn vshufeh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffb"] + fn vshuffb(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffeb"] + fn vshuffeb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffh"] + fn vshuffh(_: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffob"] + fn vshuffob(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vshuffvdd"] + fn vshuffvdd(_: HvxVector, _: HvxVector, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeb"] + fn vshufoeb(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoeh"] + fn vshufoeh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vshufoh"] + fn vshufoh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf"] + fn vsub_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.hf.hf"] + fn vsub_hf_hf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16"] + fn vsub_qf16(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf16.mix"] + fn vsub_qf16_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32"] + fn vsub_qf32(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.qf32.mix"] + fn vsub_qf32_mix(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf"] + fn vsub_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsub.sf.hf"] + fn vsub_sf_hf(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsub.sf.sf"] + fn vsub_sf_sf(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb"] + fn vsubb(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubb.dv"] + fn vsubb_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubbnq"] + fn vsubbnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbq"] + fn vsubbq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat"] + fn vsubbsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubbsat.dv"] + fn vsubbsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubh"] + fn vsubh(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubh.dv"] + fn vsubh_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhnq"] + fn vsubhnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhq"] + fn vsubhq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat"] + fn vsubhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubhsat.dv"] + fn vsubhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubhw"] + fn vsubhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububh"] + fn vsububh(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsububsat"] + fn vsububsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsububsat.dv"] + fn vsububsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubububb.sat"] + fn vsubububb_sat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat"] + fn vsubuhsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuhsat.dv"] + fn vsubuhsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuhw"] + fn vsubuhw(_: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubuwsat"] + fn vsubuwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubuwsat.dv"] + fn vsubuwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubw"] + fn vsubw(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubw.dv"] + fn vsubw_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vsubwnq"] + fn vsubwnq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwq"] + fn vsubwq(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat"] + fn vsubwsat(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vsubwsat.dv"] + fn vsubwsat_dv(_: HvxVectorPair, _: HvxVectorPair) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vswap"] + fn vswap(_: HvxVector, _: HvxVector, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb"] + fn vtmpyb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyb.acc"] + fn vtmpyb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus"] + fn vtmpybus(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpybus.acc"] + fn vtmpybus_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb"] + fn vtmpyhb(_: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vtmpyhb.acc"] + fn vtmpyhb_acc(_: HvxVectorPair, _: HvxVectorPair, _: i32) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackb"] + fn vunpackb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackh"] + fn vunpackh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackob"] + fn vunpackob(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackoh"] + fn vunpackoh(_: HvxVectorPair, _: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackub"] + fn vunpackub(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vunpackuh"] + fn vunpackuh(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vxor"] + fn vxor(_: HvxVector, _: HvxVector) -> HvxVector; + #[link_name = "llvm.hexagon.V6.vzb"] + fn vzb(_: HvxVector) -> HvxVectorPair; + #[link_name = "llvm.hexagon.V6.vzh"] + fn vzh(_: HvxVector) -> HvxVectorPair; +} + +/// `Rd32=vextract(Vu32,Rs32)` +/// +/// Instruction Type: LD +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(extractw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_r_vextract_vr(vu: HvxVector, rs: i32) -> i32 { + extractw(vu, rs) +} + +/// `Vd32=hi(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(hi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_hi_w(vss: HvxVectorPair) -> HvxVector { + hi(vss) +} + +/// `Vd32=lo(Vss32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lo))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_lo_w(vss: HvxVectorPair) -> HvxVector { + lo(vss) +} + +/// `Vd32=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(lvsplatw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsplat_r(rt: i32) -> HvxVector { + lvsplatw(rt) +} + +/// `Vd32.uh=vabsdiff(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffh(vu, vv) +} + +/// `Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vabsdiff_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffub(vu, vv) +} + +/// `Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vabsdiff_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffuh(vu, vv) +} + +/// `Vd32.uw=vabsdiff(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsdiffw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vabsdiff_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vabsdiffw(vu, vv) +} + +/// `Vd32.h=vabs(Vu32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh(vu: HvxVector) -> HvxVector { + vabsh(vu) +} + +/// `Vd32.h=vabs(Vu32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vabs_vh_sat(vu: HvxVector) -> HvxVector { + vabsh_sat(vu) +} + +/// `Vd32.w=vabs(Vu32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw(vu: HvxVector) -> HvxVector { + vabsw(vu) +} + +/// `Vd32.w=vabs(Vu32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vabsw_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vabs_vw_sat(vu: HvxVector) -> HvxVector { + vabsw_sat(vu) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddb(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddb_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddh(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddh_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddhsat(vu, vv) +} + +/// `Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddhw(vu, vv) +} + +/// `Vdd32.h=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vadd_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vaddubh(vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddubsat(vu, vv) +} + +/// `Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddubsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vadd_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddubsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vadd_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduhsat(vu, vv) +} + +/// `Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vadd_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vadduhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadduhw(vu, vv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_add(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddw_dv(vuu, vvv) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddwsat(vu, vv) +} + +/// `Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaddwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vadd_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddwsat_dv(vuu, vvv) +} + +/// `Vd32=valign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + valignb(vu, vv, rt) +} + +/// `Vd32=valign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(valignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_valign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + valignbi(vu, vv, iu3) +} + +/// `Vd32=vand(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vand))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_and(vu, vv) +} + +/// `Vd32.h=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vaslh(vu, rt) +} + +/// `Vd32.h=vasl(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasl_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslhv(vu, vv) +} + +/// `Vd32.w=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vaslw(vu, rt) +} + +/// `Vx32.w+=vasl(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vaslacc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslw_acc(vx, vu, rt) +} + +/// `Vd32.w=vasl(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vaslwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasl_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaslwv(vu, vv) +} + +/// `Vd32.h=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhr(vu: HvxVector, rt: i32) -> HvxVector { + vasrh(vu, rt) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhbrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhubsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrhv(vu, vv) +} + +/// `Vd32.w=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwr(vu: HvxVector, rt: i32) -> HvxVector { + vasrw(vu, rt) +} + +/// `Vx32.w+=vasr(Vu32.w,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasracc_vwvwr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrw_acc(vx, vu, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwh(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhrndsat(vu, vv, rt) +} + +/// `Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwhsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhsat(vu, vv, rt) +} + +/// `Vd32.w=vasr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vasrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vasr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vasrwv(vu, vv) +} + +/// `Vd32=Vu32` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassign))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_equals_v(vu: HvxVector) -> HvxVector { + vassign(vu) +} + +/// `Vdd32=Vuu32` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vassignp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_equals_w(vuu: HvxVectorPair) -> HvxVectorPair { + vassignp(vuu) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgh(vu, vv) +} + +/// `Vd32.h=vavg(Vu32.h,Vv32.h):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavghrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vavg_vhvh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavghrnd(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgub(vu, vv) +} + +/// `Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgubrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vavg_vubvub_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgubrnd(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguh(vu, vv) +} + +/// `Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavguhrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vavg_vuhvuh_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguhrnd(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgw(vu, vv) +} + +/// `Vd32.w=vavg(Vu32.w,Vv32.w):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vavgwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vavg_vwvw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgwrnd(vu, vv) +} + +/// `Vd32.uh=vcl0(Vu32.uh)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcl0_vuh(vu: HvxVector) -> HvxVector { + vcl0h(vu) +} + +/// `Vd32.uw=vcl0(Vu32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcl0w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vcl0_vuw(vu: HvxVector) -> HvxVector { + vcl0w(vu) +} + +/// `Vdd32=vcombine(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vcombine))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vcombine_vv(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vcombine(vu, vv) +} + +/// `Vd32=#0` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vzero() -> HvxVector { + vd0() +} + +/// `Vd32.b=vdeal(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeal_vb(vu: HvxVector) -> HvxVector { + vdealb(vu) +} + +/// `Vd32.b=vdeale(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealb4w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vdeale_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdealb4w(vu, vv) +} + +/// `Vd32.h=vdeal(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdeal_vh(vu: HvxVector) -> HvxVector { + vdealh(vu) +} + +/// `Vdd32=vdeal(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdealvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vdeal_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vdealvdd(vu, vv, rt) +} + +/// `Vd32=vdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdelta(vu, vv) +} + +/// `Vd32.h=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus(vu, rt) +} + +/// `Vx32.h+=vdmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vdmpyacc_vhvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.h=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpybus_dv(vuu, rt) +} + +/// `Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpybus_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vdmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpybus_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhb_acc(vx, vu, rt) +} + +/// `Vdd32.w=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdmpyhb_dv(vuu, rt) +} + +/// `Vxx32.w+=vdmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhb_dv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vdmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdmpyhb_dv_acc(vxx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whrh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhrh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhrh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhrh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_whruh_sat(vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat(vuu, rt) +} + +/// `Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsuisat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwwhruh_sat(vx: HvxVector, vuu: HvxVectorPair, rt: i32) -> HvxVector { + vdmpyhsuisat_acc(vx, vuu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhruh_sat(vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat(vu, rt) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhsusat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhruh_sat(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vdmpyhsusat_acc(vx, vu, rt) +} + +/// `Vd32.w=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpy_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat(vu, vv) +} + +/// `Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdmpyhvsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vdmpyacc_vwvhvh_sat(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpyhvsat_acc(vx, vu, vv) +} + +/// `Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsad_wuhruh(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vdsaduh(vuu, rt) +} + +/// `Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vdsaduh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vdsadacc_wuwwuhruh( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vdsaduh_acc(vxx, vuu, rt) +} + +/// `Vx32.w=vinsert(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vinsertwr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vinsert_vwr(vx: HvxVector, rt: i32) -> HvxVector { + vinsertwr(vx, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlalignb(vu, vv, rt) +} + +/// `Vd32=vlalign(Vu32,Vv32,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlalignbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vlalign_vvi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlalignbi(vu, vv, iu3) +} + +/// `Vd32.uh=vlsr(Vu32.uh,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vlsr_vuhr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrh(vu, rt) +} + +/// `Vd32.h=vlsr(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vlsr_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrhv(vu, vv) +} + +/// `Vd32.uw=vlsr(Vu32.uw,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vlsr_vuwr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrw(vu, rt) +} + +/// `Vd32.w=vlsr(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlsrwv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vlsr_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vlsrwv(vu, vv) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbr( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVector { + vlutvvb_oracc(vx, vu, vv, rt) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhr( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + rt: i32, +) -> HvxVectorPair { + vlutvwh_oracc(vxx, vu, vv, rt) +} + +/// `Vd32.h=vmax(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmax_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxh(vu, vv) +} + +/// `Vd32.ub=vmax(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmax_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxub(vu, vv) +} + +/// `Vd32.uh=vmax(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmax_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxuh(vu, vv) +} + +/// `Vd32.w=vmax(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmaxw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmax_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxw(vu, vv) +} + +/// `Vd32.h=vmin(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmin_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminh(vu, vv) +} + +/// `Vd32.ub=vmin(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vmin_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminub(vu, vv) +} + +/// `Vd32.uh=vmin(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmin_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminuh(vu, vv) +} + +/// `Vd32.w=vmin(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vminw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmin_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminw(vu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabus(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabus_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabusv(vuu, vvv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpabuuv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubwub(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vmpabuuv(vuu, vvv) +} + +/// `Vdd32.w=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpahb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpahb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpahb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus(vu, rt) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubrb(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpybus_acc(vxx, vu, rt) +} + +/// `Vdd32.h=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybusv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvubvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybusv_acc(vxx, vu, vv) +} + +/// `Vdd32.h=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpybv(vu, vv) +} + +/// `Vxx32.h+=vmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpyacc_whvbvb( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpybv_acc(vxx, vu, vv) +} + +/// `Vd32.w=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyewuh(vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhrh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh(vu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsat_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh_sat( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyhsat_acc(vxx, vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_rnd_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhsrs(vu, rt) +} + +/// `Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhss))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhrh_s1_sat(vu: HvxVector, rt: i32) -> HvxVector { + vmpyhss(vu, rt) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhus(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhus_acc(vxx, vu, vv) +} + +/// `Vdd32.w=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpy_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyhv(vu, vv) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyhvsrs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpy_vhvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyhvsrs(vu, vv) +} + +/// `Vd32.w=vmpyieo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyieoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyieoh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewh_acc(vx, vu, vv) +} + +/// `Vd32.w=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyie_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh(vu, vv) +} + +/// `Vx32.w+=vmpyie(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiewuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyieacc_vwvwvuh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiewuh_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih(vu, vv) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyih_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhvh(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyih_acc(vx, vu, vv) +} + +/// `Vd32.h=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyi_vhrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb(vu, rt) +} + +/// `Vx32.h+=vmpyi(Vu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyihb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vmpyiacc_vhvhrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyihb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyio(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyio_vwvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyiowh(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrb(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwb_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyiwh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwh_acc(vx, vu, rt) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh(vu, vv) +} + +/// `Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyo_vwvh_s1_rnd_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyowh_rnd(vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_rnd_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_rnd_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_rnd_sacc(vx, vu, vv) +} + +/// `Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyowh_sacc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyoacc_vwvwvh_s1_sat_shift( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, +) -> HvxVector { + vmpyowh_sacc(vx, vu, vv) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyub(vu, rt) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubrub( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyub_acc(vxx, vu, rt) +} + +/// `Vdd32.uh=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyubv(vu, vv) +} + +/// `Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vmpyacc_wuhvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyubv_acc(vxx, vu, vv) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhruh(vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyuh(vu, rt) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhruh( + vxx: HvxVectorPair, + vu: HvxVector, + rt: i32, +) -> HvxVectorPair { + vmpyuh_acc(vxx, vu, rt) +} + +/// `Vdd32.uw=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpy_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyuhv(vu, vv) +} + +/// `Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vmpyuhv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vmpyacc_wuwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyuhv_acc(vxx, vu, vv) +} + +/// `Vd32.h=vnavg(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnavg_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgh(vu, vv) +} + +/// `Vd32.b=vnavg(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgub(vu, vv) +} + +/// `Vd32.w=vnavg(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnavgw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnavg_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgw(vu, vv) +} + +/// `Vd32.h=vnormamt(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vnormamt_vh(vu: HvxVector) -> HvxVector { + vnormamth(vu) +} + +/// `Vd32.w=vnormamt(Vu32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnormamtw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vnormamt_vw(vu: HvxVector) -> HvxVector { + vnormamtw(vu) +} + +/// `Vd32=vnot(Vu32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vnot))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vnot_v(vu: HvxVector) -> HvxVector { + vnot(vu) +} + +/// `Vd32=vor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_or(vu, vv) +} + +/// `Vd32.b=vpacke(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacke_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeb(vu, vv) +} + +/// `Vd32.h=vpacke(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacke_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackeh(vu, vv) +} + +/// `Vd32.b=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhb_sat(vu, vv) +} + +/// `Vd32.ub=vpack(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackhub_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vpack_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackhub_sat(vu, vv) +} + +/// `Vd32.b=vpacko(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vpacko_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackob(vu, vv) +} + +/// `Vd32.h=vpacko(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpacko_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackoh(vu, vv) +} + +/// `Vd32.h=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwh_sat(vu, vv) +} + +/// `Vd32.uh=vpack(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpackwuh_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vpack_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vpackwuh_sat(vu, vv) +} + +/// `Vd32.h=vpopcount(Vu32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vpopcounth))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vpopcount_vh(vu: HvxVector) -> HvxVector { + vpopcounth(vu) +} + +/// `Vd32=vrdelta(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrdelta))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vrdelta_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrdelta(vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubrb(vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus(vu, rt) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubrb(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpybus_acc(vx, vu, rt) +} + +/// `Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpy_wubrbi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpybusi(vuu, rt, iu1) +} + +/// `Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vrmpyacc_wwwubrbi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpybusi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.w=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vubvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.ub,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybusv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvubvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybusv_acc(vx, vu, vv) +} + +/// `Vd32.w=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpy_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv(vu, vv) +} + +/// `Vx32.w+=vrmpy(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpybv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vrmpyacc_vwvbvb(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpybv_acc(vx, vu, vv) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubrub(vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub(vu, rt) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vrmpyub_acc(vx, vu, rt) +} + +/// `Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpy_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrmpyubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrmpyacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrmpyubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.uw=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpy_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv(vu, vv) +} + +/// `Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrmpyubv_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrmpyacc_vuwvubvub(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vrmpyubv_acc(vx, vu, vv) +} + +/// `Vd32=vror(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vror))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vror_vr(vu: HvxVector, rt: i32) -> HvxVector { + vror(vu, rt) +} + +/// `Vd32.b=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhb(vu, vv) +} + +/// `Vd32.ub=vround(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundhub(vu, vv) +} + +/// `Vd32.h=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwh(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vroundwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vroundwuh(vu, vv) +} + +/// `Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsad_wubrubi(vuu: HvxVectorPair, rt: i32, iu1: i32) -> HvxVectorPair { + vrsadubi(vuu, rt, iu1) +} + +/// `Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vrsadubi_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vrsadacc_wuwwubrubi( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, + iu1: i32, +) -> HvxVectorPair { + vrsadubi_acc(vxx, vuu, rt, iu1) +} + +/// `Vd32.ub=vsat(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsathub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsat_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsathub(vu, vv) +} + +/// `Vd32.h=vsat(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsatwh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsat_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatwh(vu, vv) +} + +/// `Vdd32.h=vsxt(Vu32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsxt_vb(vu: HvxVector) -> HvxVectorPair { + vsb(vu) +} + +/// `Vdd32.w=vsxt(Vu32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsxt_vh(vu: HvxVector) -> HvxVectorPair { + vsh(vu) +} + +/// `Vd32.h=vshuffe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufeh(vu, vv) +} + +/// `Vd32.b=vshuff(Vu32.b)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuff_vb(vu: HvxVector) -> HvxVector { + vshuffb(vu) +} + +/// `Vd32.b=vshuffe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffeb(vu, vv) +} + +/// `Vd32.h=vshuff(Vu32.h)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuff_vh(vu: HvxVector) -> HvxVector { + vshuffh(vu) +} + +/// `Vd32.b=vshuffo(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vshuffo_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshuffob(vu, vv) +} + +/// `Vdd32=vshuff(Vu32,Vv32,Rt8)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshuffvdd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vshuff_vvr(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vshuffvdd(vu, vv, rt) +} + +/// `Vdd32.b=vshuffoe(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vshuffoe_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeb(vu, vv) +} + +/// `Vdd32.h=vshuffoe(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoeh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vshuffoe_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vshufoeh(vu, vv) +} + +/// `Vd32.h=vshuffo(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vshufoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vshuffo_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vshufoh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubb(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubb_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubb_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubh(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubh_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubh_dv(vuu, vvv) +} + +/// `Vd32.h=vsub(Vu32.h,Vv32.h):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsub_vhvh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubhsat(vu, vv) +} + +/// `Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_whwh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubhw(vu, vv) +} + +/// `Vdd32.h=vsub(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vsub_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsububh(vu, vv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvub_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsububsat(vu, vv) +} + +/// `Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsububsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wub_vsub_wubwub_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsububsat_dv(vuu, vvv) +} + +/// `Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsub_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuhsat(vu, vv) +} + +/// `Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vsub_wuhwuh_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuhsat_dv(vuu, vvv) +} + +/// `Vdd32.w=vsub(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubuhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsubuhw(vu, vv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_sub(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubw_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubw_dv(vuu, vvv) +} + +/// `Vd32.w=vsub(Vu32.w,Vv32.w):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsub_vwvw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubwsat(vu, vv) +} + +/// `Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vsubwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vsub_wwww_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubwsat_dv(vuu, vvv) +} + +/// `Vdd32.h=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wbrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyb(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.b,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwbrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpy_wubrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpybus(vuu, rt) +} + +/// `Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpybus_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vtmpyacc_whwubrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpybus_acc(vxx, vuu, rt) +} + +/// `Vdd32.w=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpy_whrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vtmpyhb(vuu, rt) +} + +/// `Vxx32.w+=vtmpy(Vuu32.h,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vtmpyhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vtmpyacc_wwwhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vtmpyhb_acc(vxx, vuu, rt) +} + +/// `Vdd32.h=vunpack(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpack_vb(vu: HvxVector) -> HvxVectorPair { + vunpackb(vu) +} + +/// `Vdd32.w=vunpack(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpack_vh(vu: HvxVector) -> HvxVectorPair { + vunpackh(vu) +} + +/// `Vxx32.h|=vunpacko(Vu32.b)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackob))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vunpackoor_whvb(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackob(vxx, vu) +} + +/// `Vxx32.w|=vunpacko(Vu32.h)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackoh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vunpackoor_wwvh(vxx: HvxVectorPair, vu: HvxVector) -> HvxVectorPair { + vunpackoh(vxx, vu) +} + +/// `Vdd32.uh=vunpack(Vu32.ub)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vunpack_vub(vu: HvxVector) -> HvxVectorPair { + vunpackub(vu) +} + +/// `Vdd32.uw=vunpack(Vu32.uh)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vunpackuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vunpack_vuh(vu: HvxVector) -> HvxVectorPair { + vunpackuh(vu) +} + +/// `Vd32=vxor(Vu32,Vv32)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vxor))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vxor_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + simd_xor(vu, vv) +} + +/// `Vdd32.uh=vzxt(Vu32.ub)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuh_vzxt_vub(vu: HvxVector) -> HvxVectorPair { + vzb(vu) +} + +/// `Vdd32.uw=vzxt(Vu32.uh)` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[cfg_attr(test, assert_instr(vzh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vzxt_vuh(vu: HvxVector) -> HvxVectorPair { + vzh(vu) +} + +/// `Vd32.b=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplatb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsplat_r(rt: i32) -> HvxVector { + lvsplatb(rt) +} + +/// `Vd32.h=vsplat(Rt32)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(lvsplath))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vsplat_r(rt: i32) -> HvxVector { + lvsplath(rt) +} + +/// `Vd32.b=vadd(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vadd_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddbsat(vu, vv) +} + +/// `Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vadd_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vaddbsat_dv(vuu, vvv) +} + +/// `Vd32.h=vadd(vclb(Vu32.h),Vv32.h)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vadd_vclb_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbh(vu, vv) +} + +/// `Vd32.w=vadd(vclb(Vu32.w),Vv32.w)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddclbw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vclb_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddclbw(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.h,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvhvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddhw_acc(vxx, vu, vv) +} + +/// `Vxx32.h+=vadd(Vu32.ub,Vv32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddubh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vaddacc_whvubvub( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vaddubh_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vadd(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vaddububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vadd_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vaddububb_sat(vu, vv) +} + +/// `Vxx32.w+=vadd(Vu32.uh,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduhw_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vaddacc_wwvuhvuh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vadduhw_acc(vxx, vu, vv) +} + +/// `Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vadd_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadduwsat(vu, vv) +} + +/// `Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vadduwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vadd_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vadduwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrhbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vasr_vhvhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrhbsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasruwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhrndsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vasrwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vwvwr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasrwuhrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vlsr(Vu32.ub,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlsrb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vlsr_vubr(vu: HvxVector, rt: i32) -> HvxVector { + vlsrb(vu, rt) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vlutvvb_nm(vu, vv, rt) +} + +/// `Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvb_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32or_vbvbvbi( + vx: HvxVector, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVector { + vlutvvb_oracci(vx, vu, vv, iu3) +} + +/// `Vd32.b=vlut32(Vu32.b,Vv32.b,#u3)` +/// +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvvbi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vlut32_vbvbi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVector { + vlutvvbi(vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_nm))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhr_nomatch(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVectorPair { + vlutvwh_nm(vu, vv, rt) +} + +/// `Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwh_oracci))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16or_whvbvhi( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, + iu3: i32, +) -> HvxVectorPair { + vlutvwh_oracci(vxx, vu, vv, iu3) +} + +/// `Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vlutvwhi))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vlut16_vbvhi(vu: HvxVector, vv: HvxVector, iu3: i32) -> HvxVectorPair { + vlutvwhi(vu, vv, iu3) +} + +/// `Vd32.b=vmax(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmaxb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmax_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmaxb(vu, vv) +} + +/// `Vd32.b=vmin(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vminb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vmin_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vminb(vu, vv) +} + +/// `Vdd32.w=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpa_wuhrb(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpauhb(vuu, rt) +} + +/// `Vxx32.w+=vmpa(Vuu32.uh,Rt32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpauhb_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpaacc_wwwuhrb( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpauhb_acc(vxx, vuu, rt) +} + +/// `Vdd32=vmpye(Vu32.w,Vv32.uh)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyewuh_64))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpye_vwvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpyewuh_64(vu, vv) +} + +/// `Vd32.w=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyi_vwrub(vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub(vu, rt) +} + +/// `Vx32.w+=vmpyi(Vu32.w,Rt32.ub)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyiwub_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vmpyiacc_vwvwrub(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyiwub_acc(vx, vu, rt) +} + +/// `Vxx32+=vmpyo(Vu32.w,Vv32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vmpyowh_64_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vmpyoacc_wvwvh( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpyowh_64_acc(vxx, vu, vv) +} + +/// `Vd32.ub=vround(Vu32.uh,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduhub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vround_vuhvuh_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduhub(vu, vv) +} + +/// `Vd32.uh=vround(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vrounduwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vround_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrounduwuh(vu, vv) +} + +/// `Vd32.uh=vsat(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsatuwuh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vsat_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatuwuh(vu, vv) +} + +/// `Vd32.b=vsub(Vu32.b,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vsub_vbvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubbsat(vu, vv) +} + +/// `Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubbsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wb_vsub_wbwb_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubbsat_dv(vuu, vvv) +} + +/// `Vd32.ub=vsub(Vu32.ub,Vv32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubububb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vsub_vubvb_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubububb_sat(vu, vv) +} + +/// `Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vsub_vuwvuw_sat(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsubuwsat(vu, vv) +} + +/// `Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat` +/// +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[cfg_attr(test, assert_instr(vsubuwsat_dv))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wuw_vsub_wuwwuw_sat(vuu: HvxVectorPair, vvv: HvxVectorPair) -> HvxVectorPair { + vsubuwsat_dv(vuu, vvv) +} + +/// `Vd32.b=vabs(Vu32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb(vu: HvxVector) -> HvxVector { + vabsb(vu) +} + +/// `Vd32.b=vabs(Vu32.b):sat` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vabsb_sat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vabs_vb_sat(vu: HvxVector) -> HvxVector { + vabsb_sat(vu) +} + +/// `Vx32.h+=vasl(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vaslh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vaslacc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vaslh_acc(vx, vu, rt) +} + +/// `Vx32.h+=vasr(Vu32.h,Rt32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasrh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vasracc_vhvhr(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vasrh_acc(vx, vu, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_rnd_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubrndsat(vu, vv, rt) +} + +/// `Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_vuhvuhr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruhubsat(vu, vv, rt) +} + +/// `Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vasruwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_vuwvuwr_sat(vu: HvxVector, vv: HvxVector, rt: i32) -> HvxVector { + vasruwuhsat(vu, vv, rt) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgb(vu, vv) +} + +/// `Vd32.b=vavg(Vu32.b,Vv32.b):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavgbrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vavg_vbvb_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavgbrnd(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguw(vu, vv) +} + +/// `Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vavguwrnd))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vavg_vuwvuw_rnd(vu: HvxVector, vv: HvxVector) -> HvxVector { + vavguwrnd(vu, vv) +} + +/// `Vdd32=#0` +/// +/// Instruction Type: MAPPING +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vdd0))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vzero() -> HvxVectorPair { + vdd0() +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvh(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermh(rs, rt, mu, vv) +} + +/// `vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armww(rs: *mut HvxVector, rt: i32, mu: i32, vvv: HvxVectorPair) { + vgathermhw(rs, rt, mu, vvv) +} + +/// `vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vgathermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_armvw(rs: *mut HvxVector, rt: i32, mu: i32, vv: HvxVector) { + vgathermw(rs, rt, mu, vv) +} + +/// `Vdd32.h=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpa_wubrub(vuu: HvxVectorPair, rt: i32) -> HvxVectorPair { + vmpabuu(vuu, rt) +} + +/// `Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpabuu_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wh_vmpaacc_whwubrub( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + rt: i32, +) -> HvxVectorPair { + vmpabuu_acc(vxx, vuu, rt) +} + +/// `Vxx32.w+=vmpy(Vu32.h,Rt32.h)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyh_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vmpyacc_wwvhrh(vxx: HvxVectorPair, vu: HvxVector, rt: i32) -> HvxVectorPair { + vmpyh_acc(vxx, vu, rt) +} + +/// `Vd32.uw=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpye_vuhruh(vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe(vu, rt) +} + +/// `Vx32.uw+=vmpye(Vu32.uh,Rt32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vmpyuhe_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vmpyeacc_vuwvuhruh(vx: HvxVector, vu: HvxVector, rt: i32) -> HvxVector { + vmpyuhe_acc(vx, vu, rt) +} + +/// `Vd32.b=vnavg(Vu32.b,Vv32.b)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vnavgb))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vnavg_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVector { + vnavgb(vu, vv) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.h).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermh_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvhv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermh_add(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32` +/// +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermhw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmwwv(rt: i32, mu: i32, vvv: HvxVectorPair, vw: HvxVector) { + vscattermhw_add(rt, mu, vvv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw(rt, mu, vv, vw) +} + +/// `vscatter(Rt32,Mu2,Vv32.w).w+=Vw32` +/// +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[cfg_attr(test, assert_instr(vscattermw_add))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatteracc_rmvwv(rt: i32, mu: i32, vv: HvxVector, vw: HvxVector) { + vscattermw_add(rt, mu, vv, vw) +} + +/// `Vxx32.w=vasrinto(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VP_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vasr_into))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_vasrinto_wwvwvw( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vasr_into(vxx, vu, vv) +} + +/// `Vd32.uw=vrotr(Vu32.uw,Vv32.uw)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vrotr))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuw_vrotr_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vrotr(vu, vv) +} + +/// `Vd32.w=vsatdw(Vu32.w,Vv32.w)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[cfg_attr(test, assert_instr(vsatdw))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vsatdw_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsatdw(vu, vv) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_h( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyhubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_h( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyhubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpy_wubwbi_v( + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10(vuu, vvv, iu2) +} + +/// `Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(v6mpyvubs10_vxx))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_ww_v6mpyacc_wwwubwbi_v( + vxx: HvxVectorPair, + vuu: HvxVectorPair, + vvv: HvxVectorPair, + iu2: i32, +) -> HvxVectorPair { + v6mpyvubs10_vxx(vxx, vuu, vvv, iu2) +} + +/// `Vd32.hf=vabs(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vabs_vhf(vu: HvxVector) -> HvxVector { + vabs_hf(vu) +} + +/// `Vd32.sf=vabs(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vabs_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vabs_vsf(vu: HvxVector) -> HvxVector { + vabs_sf(vu) +} + +/// `Vd32.qf16=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf(vu, vv) +} + +/// `Vd32.hf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16(vu, vv) +} + +/// `Vd32.qf16=vadd(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vadd_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf(vu, vv) +} + +/// `Vdd32.sf=vadd(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vadd_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vadd_sf_hf(vu, vv) +} + +/// `Vd32.sf=vadd(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vadd_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vadd_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vadd_sf_sf(vu, vv) +} + +/// `Vd32.w=vfmv(Vu32.w)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vassign_fp))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vfmv_vw(vu: HvxVector) -> HvxVector { + vassign_fp(vu) +} + +/// `Vd32.hf=Vu32.qf16` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vqf16(vu: HvxVector) -> HvxVector { + vconv_hf_qf16(vu) +} + +/// `Vd32.hf=Vuu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_hf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_wqf32(vuu: HvxVectorPair) -> HvxVector { + vconv_hf_qf32(vuu) +} + +/// `Vd32.sf=Vu32.qf32` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vconv_sf_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vqf32(vu: HvxVector) -> HvxVector { + vconv_sf_qf32(vu) +} + +/// `Vd32.b=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_b_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_b_hf(vu, vv) +} + +/// `Vd32.h=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_h_hf(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vb(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_b(vu) +} + +/// `Vd32.hf=vcvt(Vu32.h)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vh(vu: HvxVector) -> HvxVector { + vcvt_hf_h(vu) +} + +/// `Vd32.hf=vcvt(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_hf_sf(vu, vv) +} + +/// `Vdd32.hf=vcvt(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_vub(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_ub(vu) +} + +/// `Vd32.hf=vcvt(Vu32.uh)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_hf_uh))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vcvt_vuh(vu: HvxVector) -> HvxVector { + vcvt_hf_uh(vu) +} + +/// `Vdd32.sf=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vcvt_vhf(vu: HvxVector) -> HvxVectorPair { + vcvt_sf_hf(vu) +} + +/// `Vd32.ub=vcvt(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_ub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vcvt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vcvt_ub_hf(vu, vv) +} + +/// `Vd32.uh=vcvt(Vu32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vcvt_uh_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vcvt_vhf(vu: HvxVector) -> HvxVector { + vcvt_uh_hf(vu) +} + +/// `Vd32.sf=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf(vu, vv) +} + +/// `Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vdmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vdmpyacc_vsfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vdmpy_sf_hf_acc(vx, vu, vv) +} + +/// `Vd32.hf=vfmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_hf(vu, vv) +} + +/// `Vd32.sf=vfmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_sf(vu, vv) +} + +/// `Vd32.hf=vfmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_hf(vu, vv) +} + +/// `Vd32.sf=vfmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_sf(vu, vv) +} + +/// `Vd32.hf=vfneg(Vu32.hf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vfneg_vhf(vu: HvxVector) -> HvxVector { + vfneg_hf(vu) +} + +/// `Vd32.sf=vfneg(Vu32.sf)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vfneg_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vfneg_vsf(vu: HvxVector) -> HvxVector { + vfneg_sf(vu) +} + +/// `Vd32.hf=vmax(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmax_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_hf(vu, vv) +} + +/// `Vd32.sf=vmax(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmax_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmax_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmax_sf(vu, vv) +} + +/// `Vd32.hf=vmin(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmin_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_hf(vu, vv) +} + +/// `Vd32.sf=vmin(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmin_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmin_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmin_sf(vu, vv) +} + +/// `Vd32.hf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf(vu, vv) +} + +/// `Vx32.hf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_hf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vmpyacc_vhfvhfvhf(vx: HvxVector, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_hf_hf_acc(vx, vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_hf(vu, vv) +} + +/// `Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf16_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf16_mix_hf(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_mix_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_mix_hf(vu, vv) +} + +/// `Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wqf32_vmpy_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_qf32_qf16(vu, vv) +} + +/// `Vd32.qf32=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_qf32_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_qf32_sf(vu, vv) +} + +/// `Vdd32.sf=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpy_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vmpy_sf_hf(vu, vv) +} + +/// `Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_hf_acc))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vmpyacc_wsfvhfvhf( + vxx: HvxVectorPair, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPair { + vmpy_sf_hf_acc(vxx, vu, vv) +} + +/// `Vd32.sf=vmpy(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vmpy_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vmpy_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpy_sf_sf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf(vu, vv) +} + +/// `Vd32.hf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_hf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_hf_hf(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vqf16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16(vu, vv) +} + +/// `Vd32.qf16=vsub(Vu32.qf16,Vv32.hf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf16_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf16_vsub_vqf16vhf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf16_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vqf32(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.qf32,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_qf32_mix))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vqf32vsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_qf32_mix(vu, vv) +} + +/// `Vd32.qf32=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vqf32_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf(vu, vv) +} + +/// `Vdd32.sf=vsub(Vu32.hf,Vv32.hf)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_wsf_vsub_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vsub_sf_hf(vu, vv) +} + +/// `Vd32.sf=vsub(Vu32.sf,Vv32.sf)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[cfg_attr(test, assert_instr(vsub_sf_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_vsub_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVector { + vsub_sf_sf(vu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubrndsat(vuu, vv) +} + +/// `Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvuhubsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vub_vasr_wuhvub_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvuhubsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhrndsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_rnd_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhrndsat(vuu, vv) +} + +/// `Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vasrvwuhsat))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vasr_wwvuh_sat(vuu: HvxVectorPair, vv: HvxVector) -> HvxVector { + vasrvwuhsat(vuu, vv) +} + +/// `Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv69"))] +#[cfg_attr(test, assert_instr(vmpyuhvs))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vuh_vmpy_vuhvuh_rs16(vu: HvxVector, vv: HvxVector) -> HvxVector { + vmpyuhvs(vu, vv) +} + +/// `Vd32.h=Vu32.hf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_h_hf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_equals_vhf(vu: HvxVector) -> HvxVector { + vconv_h_hf(vu) +} + +/// `Vd32.hf=Vu32.h` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_hf_h))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vhf_equals_vh(vu: HvxVector) -> HvxVector { + vconv_hf_h(vu) +} + +/// `Vd32.sf=Vu32.w` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_sf_w))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vsf_equals_vw(vu: HvxVector) -> HvxVector { + vconv_sf_w(vu) +} + +/// `Vd32.w=Vu32.sf` +/// +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv73"))] +#[cfg_attr(test, assert_instr(vconv_w_sf))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_equals_vsf(vu: HvxVector) -> HvxVector { + vconv_w_sf(vu) +} + +/// `Vd32=vgetqfext(Vu32.x,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(get_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vgetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + get_qfext(vu, rt) +} + +/// `Vd32.x=vsetqfext(Vu32,Rt32)` +/// +/// Instruction Type: CVI_VX +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(set_qfext))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vsetqfext_vr(vu: HvxVector, rt: i32) -> HvxVector { + set_qfext(vu, rt) +} + +/// `Vd32.f8=vabs(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vabs_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vabs_v(vu: HvxVector) -> HvxVector { + vabs_f8(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.b)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_b))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vb(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_b(vu) +} + +/// `Vdd32.hf=vcvt2(Vu32.ub)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt2_hf_ub))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt2_vub(vu: HvxVector) -> HvxVectorPair { + vcvt2_hf_ub(vu) +} + +/// `Vdd32.hf=vcvt(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_DV +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vcvt_hf_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_whf_vcvt_v(vu: HvxVector) -> HvxVectorPair { + vcvt_hf_f8(vu) +} + +/// `Vd32.f8=vfmax(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmax_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmax_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmax_f8(vu, vv) +} + +/// `Vd32.f8=vfmin(Vu32.f8,Vv32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfmin_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfmin_vv(vu: HvxVector, vv: HvxVector) -> HvxVector { + vfmin_f8(vu, vv) +} + +/// `Vd32.f8=vfneg(Vu32.f8)` +/// +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv79"))] +#[cfg_attr(test, assert_instr(vfneg_f8))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vfneg_v(vu: HvxVector) -> HvxVector { + vfneg_f8(vu) +} + +/// `Qd4=and(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=and(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_and_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_and_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=not(Qs4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_not_q(qs: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_not(vandvrt( + core::mem::transmute::(qs), + -1, + )), + -1, + )) +} + +/// `Qd4=or(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=or(Qs4,!Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_or_qqn(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_or_n( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4=vsetq(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2(rt), -1)) +} + +/// `Qd4=xor(Qs4,Qt4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_xor_qq(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + pred_xor( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `if (!Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qnriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_nqpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4):nt=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv_nt(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_nt_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (Qv4) vmem(Rt32+#s4)=Vs32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VM_ST +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vmem_qriv(qv: HvxVectorPred, rt: *mut HvxVector, vs: HvxVector) { + vS32b_qpred_ai( + vandvrt(core::mem::transmute::(qv), -1), + rt, + vs, + ) +} + +/// `if (!Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b+=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condacc_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h+=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condacc_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w+=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condacc_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vaddwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vd32=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt(core::mem::transmute::(qu), rt) +} + +/// `Vx32|=vand(Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandvrt_acc(vx, core::mem::transmute::(qu), rt) +} + +/// `Qd4=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vand_vr(vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vu, rt)) +} + +/// `Qx4|=vand(Vu32,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vandor_qvr(qx: HvxVectorPred, vu: HvxVector, rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt_acc( + core::mem::transmute::(qx), + vu, + rt, + )) +} + +/// `Qd4=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eq_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(veqw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.eq(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_eqxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + veqw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vbvb(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtb(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.b,Vv32.b)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvbvb( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtb_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhvh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgth(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.h,Vv32.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhvh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgth_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vubvub(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtub(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.ub,Vv32.ub)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvubvub( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtub_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuhvuh(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuh(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uh,Vv32.uh)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuhvuh( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuh_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vuwvuw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtuw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.uw,Vv32.uw)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvuwvuw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtuw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vwvw(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtw(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.w,Vv32.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvwvw( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtw_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Vd32=vmux(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vmux_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVector { + vmux( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `if (!Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qnvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.b-=Vu32.b` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_condnac_qvbvb(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubbq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qnvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.h-=Vu32.h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_condnac_qvhvh(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubhq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (!Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qnvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwnq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `if (Qv4) Vx32.w-=Vu32.w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_condnac_qvwvw(qv: HvxVectorPred, vx: HvxVector, vu: HvxVector) -> HvxVector { + vsubwq( + vandvrt(core::mem::transmute::(qv), -1), + vx, + vu, + ) +} + +/// `Vdd32=vswap(Qt4,Vu32,Vv32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv60"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_w_vswap_qvv(qt: HvxVectorPred, vu: HvxVector, vv: HvxVector) -> HvxVectorPair { + vswap( + vandvrt(core::mem::transmute::(qt), -1), + vu, + vv, + ) +} + +/// `Qd4=vsetq2(Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VP +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vsetq2_r(rt: i32) -> HvxVectorPred { + core::mem::transmute::(vandqrt(pred_scalar2v2(rt), -1)) +} + +/// `Qd4.b=vshuffe(Qs4.h,Qt4.h)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qb_vshuffe_qhqh(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqh( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Qd4.h=vshuffe(Qs4.w,Qt4.w)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA_DV +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_qh_vshuffe_qwqw(qs: HvxVectorPred, qt: HvxVectorPred) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + shuffeqw( + vandvrt(core::mem::transmute::(qs), -1), + vandvrt(core::mem::transmute::(qt), -1), + ), + -1, + )) +} + +/// `Vd32=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnr(qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt( + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vx32|=vand(!Qu4,Rt32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VX_LATE +/// Execution Slots: SLOT23 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vandor_vqnr(vx: HvxVector, qu: HvxVectorPred, rt: i32) -> HvxVector { + vandnqrt_acc( + vx, + vandvrt(core::mem::transmute::(qu), -1), + rt, + ) +} + +/// `Vd32=vand(!Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qnv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvnqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `Vd32=vand(Qv4,Vu32)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv62"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_v_vand_qv(qv: HvxVectorPred, vu: HvxVector) -> HvxVector { + vandvqv( + vandvrt(core::mem::transmute::(qv), -1), + vu, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvh( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermhq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER_DV +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmww( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, +) { + vgathermhwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + ) +} + +/// `if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_GATHER +/// Execution Slots: SLOT01 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vgather_aqrmvw( + rs: *mut HvxVector, + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, +) { + vgathermwq( + rs, + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + ) +} + +/// `Vd32.b=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vb_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqb(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.h=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vh_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqh(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `Vd32.w=prefixsum(Qv4)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VS +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_prefixsum_q(qv: HvxVectorPred) -> HvxVector { + vprefixqw(vandvrt( + core::mem::transmute::(qv), + -1, + )) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvhv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermhq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER_DV +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmwwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vvv: HvxVectorPair, + vw: HvxVector, +) { + vscattermhwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vvv, + vw, + ) +} + +/// `if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_SCATTER +/// Execution Slots: SLOT0 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv65"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vscatter_qrmvwv( + qs: HvxVectorPred, + rt: i32, + mu: i32, + vv: HvxVector, + vw: HvxVector, +) { + vscattermwq( + vandvrt(core::mem::transmute::(qs), -1), + rt, + mu, + vv, + vw, + ) +} + +/// `Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv66"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_vw_vadd_vwvwq_carry_sat( + vu: HvxVector, + vv: HvxVector, + qs: HvxVectorPred, +) -> HvxVector { + vaddcarrysat( + vu, + vv, + vandvrt(core::mem::transmute::(qs), -1), + ) +} + +/// `Qd4=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vhfvhf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgthf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.hf,Vv32.hf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvhfvhf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgthf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qd4=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gt_vsfvsf(vu: HvxVector, vv: HvxVector) -> HvxVectorPred { + core::mem::transmute::(vandqrt(vgtsf(vu, vv), -1)) +} + +/// `Qx4&=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtand_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_and( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4|=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtor_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_or( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} + +/// `Qx4^=vcmp.gt(Vu32.sf,Vv32.sf)` +/// +/// This is a compound operation composed of multiple HVX instructions. +/// Instruction Type: CVI_VA +/// Execution Slots: SLOT0123 +#[inline(always)] +#[cfg_attr(target_arch = "hexagon", target_feature(enable = "hvxv68"))] +#[unstable(feature = "stdarch_hexagon", issue = "151523")] +pub unsafe fn q6_q_vcmp_gtxacc_qvsfvsf( + qx: HvxVectorPred, + vu: HvxVector, + vv: HvxVector, +) -> HvxVectorPred { + core::mem::transmute::(vandqrt( + vgtsf_xor( + vandvrt(core::mem::transmute::(qx), -1), + vu, + vv, + ), + -1, + )) +} diff --git a/crates/core_arch/src/lib.rs b/crates/core_arch/src/lib.rs index 039a4c4411..8a1bead7c4 100644 --- a/crates/core_arch/src/lib.rs +++ b/crates/core_arch/src/lib.rs @@ -23,6 +23,7 @@ mips_target_feature, powerpc_target_feature, loongarch_target_feature, + hexagon_target_feature, wasm_target_feature, abi_unadjusted, rtm_target_feature, diff --git a/crates/core_arch/src/mod.rs b/crates/core_arch/src/mod.rs index 3577175ae3..2483d07b23 100644 --- a/crates/core_arch/src/mod.rs +++ b/crates/core_arch/src/mod.rs @@ -320,6 +320,19 @@ pub mod arch { pub mod s390x { pub use crate::core_arch::s390x::*; } + + /// Platform-specific intrinsics for the `hexagon` platform. + /// + /// This module provides intrinsics for the Qualcomm Hexagon DSP architecture, + /// including the Hexagon Vector Extensions (HVX). + /// + /// See the [module documentation](../index.html) for more details. + #[cfg(any(target_arch = "hexagon", doc))] + #[doc(cfg(target_arch = "hexagon"))] + #[unstable(feature = "stdarch_hexagon", issue = "151523")] + pub mod hexagon { + pub use crate::core_arch::hexagon::*; + } } #[cfg(any(target_arch = "x86", target_arch = "x86_64", doc))] @@ -379,3 +392,7 @@ mod loongarch64; #[cfg(any(target_arch = "s390x", doc))] #[doc(cfg(target_arch = "s390x"))] mod s390x; + +#[cfg(any(target_arch = "hexagon", doc))] +#[doc(cfg(target_arch = "hexagon"))] +mod hexagon; diff --git a/crates/stdarch-gen-hexagon/Cargo.toml b/crates/stdarch-gen-hexagon/Cargo.toml new file mode 100644 index 0000000000..397c7816f8 --- /dev/null +++ b/crates/stdarch-gen-hexagon/Cargo.toml @@ -0,0 +1,9 @@ +[package] +name = "stdarch-gen-hexagon" +version = "0.1.0" +authors = ["The Rust Project Developers"] +license = "MIT OR Apache-2.0" +edition = "2021" + +[dependencies] +regex = "1.10" diff --git a/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h b/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h new file mode 100644 index 0000000000..19309a40d6 --- /dev/null +++ b/crates/stdarch-gen-hexagon/hvx_hexagon_protos.h @@ -0,0 +1,6003 @@ +//===----------------------------------------------------------------------===// +// +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. +// See https://llvm.org/LICENSE.txt for license information. +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// +//===----------------------------------------------------------------------===// +// Automatically generated file, do not edit! +//===----------------------------------------------------------------------===// + + +#ifndef _HVX_HEXAGON_PROTOS_H_ +#define _HVX_HEXAGON_PROTOS_H_ 1 + +#ifdef __HVX__ +#if __HVX_LENGTH__ == 128 +#define __BUILTIN_VECTOR_WRAP(a) a ## _128B +#else +#define __BUILTIN_VECTOR_WRAP(a) a +#endif + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Rd32=vextract(Vu32,Rs32) + C Intrinsic Prototype: Word32 Q6_R_vextract_VR(HVX_Vector Vu, Word32 Rs) + Instruction Type: LD + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_R_vextract_VR(Vu,Rs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_extractw)(Vu,Rs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=hi(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_hi_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_hi_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_hi)(Vss) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=lo(Vss32) + C Intrinsic Prototype: HVX_Vector Q6_V_lo_W(HVX_VectorPair Vss) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_lo_W(Vss) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lo)(Vss) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatw)(Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=and(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_and_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_and_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_and_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=not(Qs4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_not_Q(HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_not_Q(Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_not)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=or(Qs4,!Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_or_QQn(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_or_QQn(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_or_n)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2)(Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=xor(Qs4,Qt4) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_xor_QQ(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_xor_QQ(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QnRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QnRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_nqpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4):nt=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV_nt(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV_nt(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_nt_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) vmem(Rt32+#s4)=Vs32 + C Intrinsic Prototype: void Q6_vmem_QRIV(HVX_VectorPred Qv, HVX_Vector* Rt, HVX_Vector Vs) + Instruction Type: CVI_VM_ST + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vmem_QRIV(Qv,Rt,Vs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vS32b_qpred_ai)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Rt,Vs) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vabsdiff(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vabsdiff_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vabsdiff_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vabsdiff(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vabsdiff_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vabsdiff_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vabsdiff(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vabsdiff_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vabsdiff_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsdiffw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vabs(Vu32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vabs_Vh_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vabs_Vh_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsh_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vabs(Vu32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vabs_Vw_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vabs_Vw_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsw_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddb_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b+=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condacc_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condacc_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddh_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h+=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condacc_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condacc_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vadd_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vadd_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vadd_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vadd(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vadd_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vadd_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vadd(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vadd_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vadd_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vadd(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vadd_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vadd_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vadd_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddw_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w+=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condacc_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condacc_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vadd(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vadd_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vadd_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=valign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_valign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valignbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vand)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vand_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vand_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)(Vu,Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vand(Vu32,Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vandor_QVR(HVX_VectorPred Qx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Q_vandor_QVR(Qx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt_acc)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasl(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasl_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasl_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasl(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vaslacc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vaslacc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslw_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasl(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasl_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasl_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhubsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vasr(Vu32.w,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasracc_VwVwR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasracc_VwVwR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrw_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwh)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vasr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vasr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vasr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=Vu32 + C Intrinsic Prototype: HVX_Vector Q6_V_equals_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_equals_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=Vuu32 + C Intrinsic Prototype: HVX_VectorPair Q6_W_equals_W(HVX_VectorPair Vuu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_equals_W(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassignp)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vavg(Vu32.h,Vv32.h):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vh_vavg_VhVh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vavg_VhVh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavghrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vavg(Vu32.ub,Vv32.ub):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vub_vavg_VubVub_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vavg_VubVub_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgubrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vavg(Vu32.uh,Vv32.uh):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vavg_VuhVuh_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vavg_VuhVuh_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguhrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vavg(Vu32.w,Vv32.w):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vw_vavg_VwVw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vavg_VwVw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgwrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vcl0(Vu32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcl0_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vcl0_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vcl0(Vu32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vcl0_Vuw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vcl0_Vuw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcl0w)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vcombine(Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vcombine_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vcombine_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcombine)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=#0 + C Intrinsic Prototype: HVX_Vector Q6_V_vzero() + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vd0)() +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeal(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeal_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeal_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vdeale(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vdeale_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vdeale_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealb4w)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdeal(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdeal_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vdeal_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vdeal(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vdeal_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vdeal_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdealvdd)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdelta)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vdmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vdmpyacc_VhVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vdmpyacc_VhVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vdmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vdmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vdmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpybus_dv_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vdmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vdmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vdmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhb_dv_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhisat_acc)(Vx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsat_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_WhRuh_sat(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_WhRuh_sat(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vuu32.h,Rt32.uh,#1):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwWhRuh_sat(HVX_Vector Vx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwWhRuh_sat(Vx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsuisat_acc)(Vx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhRuh_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhRuh_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Rt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhRuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhRuh_sat(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhsusat_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpy_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpy_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vdmpy(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vdmpyacc_VwVhVh_sat(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vdmpyacc_VwVhVh_sat(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpyhvsat_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsad_WuhRuh(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsad_WuhRuh(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vdsad(Vuu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vdsadacc_WuwWuhRuh(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vdsadacc_WuwWuhRuh(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdsaduh_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbVb(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVbVb(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtb_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhVh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVhVh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgth_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVubVub(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVubVub(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtub_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuhVuh(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuhVuh(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuh_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVuwVuw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVuwVuw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtuw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVwVw(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVwVw(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtw_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w=vinsert(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vinsert_VwR(HVX_Vector Vx, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vinsert_VwR(Vx,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vinsertwr)(Vx,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vlalign(Vu32,Vv32,#u3) + C Intrinsic Prototype: HVX_Vector Q6_V_vlalign_VVI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vlalign_VVI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlalignbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vlsr(Vu32.uh,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vlsr_VuhR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vlsr_VuhR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlsr(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlsr_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vlsr_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vlsr(Vu32.uw,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vlsr_VuwR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vlsr_VuwR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrw)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vlsr(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vlsr_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vlsr_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrwv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbR(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbR(Vx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracc)(Vx,Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhR(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhR(Vxx,Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracc)(Vxx,Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmax(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmax_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmax_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmax(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmax_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmax_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmax(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmax_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmax_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmax(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmax_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmax_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmin(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmin_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vmin_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vmin(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vmin_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vmin_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmin(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmin_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vmin_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmin(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmin_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vmin_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabus_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabusv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Vvv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubWub(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubWub(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuuv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubRb(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubRb(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybus_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVubVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVubVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybusv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpyacc_WhVbVb(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpyacc_WhVbVb(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpybv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh_sat(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh_sat(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsat_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_rnd_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_rnd_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhsrs)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Rt32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhRh_s1_sat(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhRh_s1_sat(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhss)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhus_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpy_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpy_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpy(Vu32.h,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpy_VhVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpy_VhVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyhvsrs)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyieo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyieoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewh_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyie_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyie_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyie(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyieacc_VwVwVuh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyieacc_VwVwVuh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiewuh_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhVh(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhVh(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyih_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyi_VhRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyi_VhRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vmpyi(Vu32.h,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpyiacc_VhVhRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vmpyiacc_VhVhRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyihb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyio(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyio_VwVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyio_VwVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiowh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwb_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyo_VwVh_s1_rnd_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyo_VwVh_s1_rnd_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:rnd:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_rnd_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_rnd_sacc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyo(Vu32.w,Vv32.h):<<1:sat:shift + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyoacc_VwVwVh_s1_sat_shift(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_sacc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubRub(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubRub(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyub_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uh+=vmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vmpyacc_WuhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuh_vmpyacc_WuhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyubv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhRuh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhRuh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuh_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpy_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpy_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vmpy(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vmpyacc_WuwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vmpyacc_WuwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhv_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vmux(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vmux_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmux_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmux)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnavg(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnavg_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnavg_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnavg(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnavg_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnavg_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vnormamt(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vnormamt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vnormamt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamth)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vnormamt(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vnormamt_Vw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vnormamt_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnormamtw)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vnot(Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vnot_V(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vnot_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnot)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vor)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacke(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacke_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacke_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacke(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacke_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacke_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vpack(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vpack_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vpack_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackhub_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vpacko(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vpacko_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vpacko_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackob)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpacko(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpacko_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpacko_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwh_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vpack(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vpack_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vpack_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpackwuh_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vpopcount(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vpopcount_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vpopcount_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vpopcounth)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vrdelta(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vrdelta_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vrdelta_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrdelta)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubRb(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubRb(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubRb(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubRb(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybus_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpy_WubRbI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpy_WubRbI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vrmpy(Vuu32.ub,Rt32.b,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vrmpyacc_WwWubRbI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vrmpyacc_WwWubRbI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VubVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VubVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.ub,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVubVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVubVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybusv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpy_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpy_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vrmpy(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vrmpyacc_VwVbVb(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vrmpyacc_VwVbVb(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpybv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyub_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpy_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpy_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrmpy(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrmpyacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrmpyacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpy_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpy_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vrmpy(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrmpyacc_VuwVubVub(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vrmpyacc_VuwVubVub(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrmpyubv_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vror(Vu32,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vror_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vror_VR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vror)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundhub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vroundwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsad_WubRubI(HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsad_WubRubI(Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi)(Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.uw+=vrsad(Vuu32.ub,Rt32.ub,#u1) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vrsadacc_WuwWubRubI(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt, Word32 Iu1) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wuw_vrsadacc_WuwWubRubI(Vxx,Vuu,Rt,Iu1) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrsadubi_acc)(Vxx,Vuu,Rt,Iu1) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsat(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsat_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsat_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsathub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsat(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsat_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsat_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatwh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsxt(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsxt_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsxt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsxt(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsxt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsxt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuff(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuff_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuff_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuff(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuff_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuff_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vshuffo(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vshuffo_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vshuffo_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffob)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vshuff(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vshuff_VVR(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vshuff_VVR(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshuffvdd)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vshuffoe(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vshuffoe_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vshuffoe_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vshuffoe(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vshuffoe_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vshuffoe_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoeh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vshuffo(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vshuffo_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vshuffo_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vshufoh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubb_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QnVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QnVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.b-=Vu32.b + C Intrinsic Prototype: HVX_Vector Q6_Vb_condnac_QVbVb(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_condnac_QVbVb(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubh_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QnVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QnVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.h-=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vh_condnac_QVhVh(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_condnac_QVhVh(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsub(Vu32.h,Vv32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsub_VhVh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vsub_VhVh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vuu32.h,Vvv32.h):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_WhWh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vsub_WhWh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vsub(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vsub_VubVub(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vsub_VubVub(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVub_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVub_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.ub=vsub(Vuu32.ub,Vvv32.ub):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wub_vsub_WubWub_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wub_vsub_WubWub_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsububsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsub(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsub_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsub_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vsub(Vuu32.uh,Vvv32.uh):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vsub_WuhWuh_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vsub_WuhWuh_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_VuhVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vsub_VuhVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuhw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubw_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (!Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QnVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QnVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwnq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: if (Qv4) Vx32.w-=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vw_condnac_QVwVw(HVX_VectorPred Qv, HVX_Vector Vx, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_condnac_QVwVw(Qv,Vx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w):sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vsub(Vuu32.w,Vvv32.w):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vsub_WwWw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vsub_WwWw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32=vswap(Qt4,Vu32,Vv32) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vswap_QVV(HVX_VectorPred Qt, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vswap_QVV(Qt,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vswap)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1),Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WbRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WbRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.b,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWbRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWbRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpy_WubRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpy_WubRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vtmpy(Vuu32.ub,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vtmpyacc_WhWubRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vtmpyacc_WhWubRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpybus_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpy_WhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpy_WhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vtmpy(Vuu32.h,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vtmpyacc_WwWhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vtmpyacc_WwWhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vtmpyhb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vunpack(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpack_Vb(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpack_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vunpack(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpack_Vh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpack_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vunpacko(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vunpackoor_WhVb(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vunpackoor_WhVb(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackob)(Vxx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vxx32.w|=vunpacko(Vu32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vunpackoor_WwVh(HVX_VectorPair Vxx, HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vunpackoor_WwVh(Vxx,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackoh)(Vxx,Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vunpack(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vunpack_Vub(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vunpack_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vunpack(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vunpack_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vunpack_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vunpackuh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vd32=vxor(Vu32,Vv32) + C Intrinsic Prototype: HVX_Vector Q6_V_vxor_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vxor_VV(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vxor)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uh=vzxt(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuh_vzxt_Vub(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuh_vzxt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 60 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vzxt(Vu32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vzxt_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vzxt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vzh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 60 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplatb)(Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vsplat(Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vsplat_R(Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vsplat_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_lvsplath)(Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4=vsetq2(Rt32) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vsetq2_R(Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vsetq2_R(Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_pred_scalar2v2)(Rt)),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.b=vshuffe(Qs4.h,Qt4.h) + C Intrinsic Prototype: HVX_VectorPred Q6_Qb_vshuffe_QhQh(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qb_vshuffe_QhQh(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Qd4.h=vshuffe(Qs4.w,Qt4.w) + C Intrinsic Prototype: HVX_VectorPred Q6_Qh_vshuffe_QwQw(HVX_VectorPred Qs, HVX_VectorPred Qt) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Qh_vshuffe_QwQw(Qs,Qt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_shuffeqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qt),-1))),-1) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vadd(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vadd_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vadd_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vadd_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vadd_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddbsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarry)(Vu,Vv,Qx) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.h=vadd(vclb(Vu32.h),Vv32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vadd_vclb_VhVh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vadd_vclb_VhVh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(vclb(Vu32.w),Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_vclb_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_vclb_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddclbw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.h,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVhVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVhVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddhw_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vadd(Vu32.ub,Vv32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vaddacc_WhVubVub(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vaddacc_WhVubVub(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddubh_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vadd(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vadd_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vadd_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddububb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vadd(Vu32.uh,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vaddacc_WwVuhVuh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vaddacc_WwVuhVuh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduhw_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vadd_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vadd_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vadd_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vadd_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadduwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnR(HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vand_QnR(Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32|=vand(!Qu4,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vandor_VQnR(HVX_Vector Vx, HVX_VectorPred Qu, Word32 Rt) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vandor_VQnR(Vx,Qu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandnqrt_acc)(Vx,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qu),-1),Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(!Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QnV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QnV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvnqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32=vand(Qv4,Vu32) + C Intrinsic Prototype: HVX_Vector Q6_V_vand_QV(HVX_VectorPred Qv, HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vand_QV(Qv,Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvqv)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1),Vu) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vasr_VhVhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vasr_VhVhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrhbsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VwVwR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VwVwR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrwuhrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vlsr(Vu32.ub,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vlsr_VubR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vlsr_VubR(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlsrb)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_nm)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32or_VbVbVbI(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32or_VbVbVbI(Vx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvb_oracci)(Vx,Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vlut32(Vu32.b,Vv32.b,#u3) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vlut32_VbVbI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vlut32_VbVbI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvvbi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhR_nomatch(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhR_nomatch(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_nm)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16or_WhVbVhI(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16or_WhVbVhI(Vxx,Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwh_oracci)(Vxx,Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vlut16_VbVhI(HVX_Vector Vu, HVX_Vector Vv, Word32 Iu3) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wh_vlut16_VbVhI(Vu,Vv,Iu3) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlutvwhi)(Vu,Vv,Iu3) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmax(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmax_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmax_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmaxb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vmin(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vmin_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vmin_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vminb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.w=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpa_WuhRb(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpa_WuhRb(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpa(Vuu32.uh,Rt32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpaacc_WwWuhRb(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpaacc_WwWuhRb(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhb_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32=vmpye(Vu32.w,Vv32.uh) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpye_VwVuh(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpye_VwVuh(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyewuh_64)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyi_VwRub(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyi_VwRub(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vx32.w+=vmpyi(Vu32.w,Rt32.ub) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vmpyiacc_VwVwRub(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vmpyiacc_VwVwRub(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyiwub_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vxx32+=vmpyo(Vu32.w,Vv32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_W_vmpyoacc_WVwVh(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_W_vmpyoacc_WVwVh(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyowh_64_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vround(Vu32.uh,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vround_VuhVuh_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vround_VuhVuh_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduhub)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vround(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vround_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vround_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrounduwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vsat(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vsat_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vsat_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatuwuh)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.b=vsub(Vu32.b,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vsub_VbVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vsub_VbVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wb_vsub_WbWb_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wb_vsub_WbWb_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubbsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsub_VwVwQ_carry(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred* Qx) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsub_VwVwQ_carry(Vu,Vv,Qx) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubcarry)(Vu,Vv,Qx) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vsub(Vu32.ub,Vv32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vsub_VubVb_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vsub_VubVb_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubububb_sat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vsub_VuwVuw_sat(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vsub_VuwVuw_sat(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 62 +/* ========================================================================== + Assembly Syntax: Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat + C Intrinsic Prototype: HVX_VectorPair Q6_Wuw_vsub_WuwWuw_sat(HVX_VectorPair Vuu, HVX_VectorPair Vvv) + Instruction Type: CVI_VA_DV + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wuw_vsub_WuwWuw_sat(Vuu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsubuwsat_dv)(Vuu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 62 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb)(Vu) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vabs(Vu32.b):sat + C Intrinsic Prototype: HVX_Vector Q6_Vb_vabs_Vb_sat(HVX_Vector Vu) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vabs_Vb_sat(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabsb_sat)(Vu) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasl(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vaslacc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vaslacc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaslh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h+=vasr(Vu32.h,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vasracc_VhVhR(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_vasracc_VhVhR(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrh_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_rnd_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_rnd_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubrndsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vu32.uh,Vv32.uh,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_VuhVuhR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_VuhVuhR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruhubsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_VuwVuwR_sat(HVX_Vector Vu, HVX_Vector Vv, Word32 Rt) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_VuwVuwR_sat(Vu,Vv,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasruwuhsat)(Vu,Vv,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vavg(Vu32.b,Vv32.b):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vb_vavg_VbVb_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vavg_VbVb_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavgbrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vavg(Vu32.uw,Vv32.uw):rnd + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vavg_VuwVuw_rnd(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vavg_VuwVuw_rnd(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vavguwrnd)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32=#0 + C Intrinsic Prototype: HVX_VectorPair Q6_W_vzero() + Instruction Type: MAPPING + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_W_vzero() __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdd0)() +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_ARMVh(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVh(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermh)(Rs,Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vv32.h).h + C Intrinsic Prototype: void Q6_vgather_AQRMVh(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVh(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_ARMWw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMWw(Rs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhw)(Rs,Rt,Mu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.h=vgather(Rt32,Mu2,Vvv32.w).h + C Intrinsic Prototype: void Q6_vgather_AQRMWw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv) + Instruction Type: CVI_GATHER_DV + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMWw(Rs,Qs,Rt,Mu,Vvv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermhwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_ARMVw(HVX_Vector* Rs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_ARMVw(Rs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermw)(Rs,Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vtmp.w=vgather(Rt32,Mu2,Vv32.w).w + C Intrinsic Prototype: void Q6_vgather_AQRMVw(HVX_Vector* Rs, HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv) + Instruction Type: CVI_GATHER + Execution Slots: SLOT01 + ========================================================================== */ + +#define Q6_vgather_AQRMVw(Rs,Qs,Rt,Mu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgathermwq)(Rs,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=vlut4(Vu32.uh,Rtt32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vlut4_VuhPh(HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vlut4_VuhPh(Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vlut4)(Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vdd32.h=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpa_WubRub(HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpa_WubRub(Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu)(Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.h+=vmpa(Vuu32.ub,Rt32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Wh_vmpaacc_WhWubRub(HVX_VectorPair Vxx, HVX_VectorPair Vuu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wh_vmpaacc_WhWubRub(Vxx,Vuu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpabuu_acc)(Vxx,Vuu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.h,Rtt32.h):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVhPh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVhPh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpahhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmpa(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmpa_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmpa_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpauhuhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.h=vmps(Vx32.h,Vu32.uh,Rtt32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vh_vmps_VhVhVuhPuh_sat(HVX_Vector Vx, HVX_Vector Vu, Word64 Rtt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT2 + ========================================================================== */ + +#define Q6_Vh_vmps_VhVhVuhPuh_sat(Vx,Vu,Rtt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpsuhuhsat)(Vx,Vu,Rtt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=vmpy(Vu32.h,Rt32.h) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vmpyacc_WwVhRh(HVX_VectorPair Vxx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_vmpyacc_WwVhRh(Vxx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyh_acc)(Vxx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpye_VuhRuh(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpye_VuhRuh(Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe)(Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vx32.uw+=vmpye(Vu32.uh,Rt32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vmpyeacc_VuwVuhRuh(HVX_Vector Vx, HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuw_vmpyeacc_VuwVuhRuh(Vx,Vu,Rt) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhe_acc)(Vx,Vu,Rt) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=vnavg(Vu32.b,Vv32.b) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vnavg_VbVb(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_vnavg_VbVb(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vnavgb)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.b=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vb_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vb_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqb)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.h=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vh_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqh)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: Vd32.w=prefixsum(Qv4) + C Intrinsic Prototype: HVX_Vector Q6_Vw_prefixsum_Q(HVX_VectorPred Qv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_prefixsum_Q(Qv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vprefixqw)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qv),-1)) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.h).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVhV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVhV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermh_add)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.h).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVhV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVhV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw)(Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vvv32.w).h+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMWwV(Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMWwV(Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhw_add)(Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vvv32.w).h=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMWwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_VectorPair Vvv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER_DV + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMWwV(Qs,Rt,Mu,Vvv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermhwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vvv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: vscatter(Rt32,Mu2,Vv32.w).w+=Vw32 + C Intrinsic Prototype: void Q6_vscatteracc_RMVwV(Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatteracc_RMVwV(Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermw_add)(Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 65 +/* ========================================================================== + Assembly Syntax: if (Qs4) vscatter(Rt32,Mu2,Vv32.w).w=Vw32 + C Intrinsic Prototype: void Q6_vscatter_QRMVwV(HVX_VectorPred Qs, Word32 Rt, Word32 Mu, HVX_Vector Vv, HVX_Vector Vw) + Instruction Type: CVI_SCATTER + Execution Slots: SLOT0 + ========================================================================== */ + +#define Q6_vscatter_QRMVwV(Qs,Rt,Mu,Vv,Vw) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vscattermwq)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1),Rt,Mu,Vv,Vw) +#endif /* __HEXAGON_ARCH___ >= 65 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vadd(Vu32.w,Vv32.w,Qs4):carry:sat + C Intrinsic Prototype: HVX_Vector Q6_Vw_vadd_VwVwQ_carry_sat(HVX_Vector Vu, HVX_Vector Vv, HVX_VectorPred Qs) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vadd_VwVwQ_carry_sat(Vu,Vv,Qs) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vaddcarrysat)(Vu,Vv,__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qs),-1)) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vxx32.w=vasrinto(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_vasrinto_WwVwVw(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Ww_vasrinto_WwVwVw(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasr_into)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.uw=vrotr(Vu32.uw,Vv32.uw) + C Intrinsic Prototype: HVX_Vector Q6_Vuw_vrotr_VuwVuw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuw_vrotr_VuwVuw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vrotr)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 66 +/* ========================================================================== + Assembly Syntax: Vd32.w=vsatdw(Vu32.w,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vsatdw_VwVw(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vsatdw_VwVw(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsatdw)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 66 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_h(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_h(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10)(Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):h + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_h(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_h(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyhubs10_vxx)(Vxx,Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.w=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpy_WubWbI_v(HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpy_WubWbI_v(Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10)(Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.w+=v6mpy(Vuu32.ub,Vvv32.b,#u2):v + C Intrinsic Prototype: HVX_VectorPair Q6_Ww_v6mpyacc_WwWubWbI_v(HVX_VectorPair Vxx, HVX_VectorPair Vuu, HVX_VectorPair Vvv, Word32 Iu2) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Ww_v6mpyacc_WwWubWbI_v(Vxx,Vuu,Vvv,Iu2) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_v6mpyvubs10_vxx)(Vxx,Vuu,Vvv,Iu2) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vabs(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vabs_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vabs_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vabs(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vabs_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vabs_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vadd(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vadd_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vadd_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf16_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.qf32,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_qf32_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vadd(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vadd(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vadd_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vadd(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vadd_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vadd_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.w=vfmv(Vu32.w) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vfmv_Vw(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vw_vfmv_Vw(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vassign_fp)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Vqf16(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vuu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Wqf32(HVX_VectorPair Vuu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Wqf32(Vuu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_qf32)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=Vu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_equals_Vqf32(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.b=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_b_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.h=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vh_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_h_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vb(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_Vb(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_b)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.h) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vh(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_Vh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_Vub(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_Vub(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_ub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vcvt(Vu32.uh) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vcvt_Vuh(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vcvt_Vuh(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_uh)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_sf_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vcvt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_ub_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vcvt(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vcvt_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vcvt_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_uh_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vdmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vdmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vx32.sf+=vdmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vdmpyacc_VsfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vdmpyacc_VsfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vdmpy_sf_hf_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfmax(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfmax(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfmin(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfmin(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vfneg(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vfneg_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vfneg_Vhf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vfneg(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vfneg_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vfneg_Vsf(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVhfVhf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVhfVhf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgthf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf)(Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_and)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_or)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVsfVsf(HVX_VectorPred Qx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVsfVsf(Qx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtsf_xor)(__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx),-1),Vu,Vv)),-1) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmax(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmax_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_vmax_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmax(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmax_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_vmax_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmin(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmin_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_vmin_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmin(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmin_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VA + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_vmin_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vx32.hf+=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vmpyacc_VhfVhfVhf(HVX_Vector Vx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vmpyacc_VhfVhfVhf(Vx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_hf_acc)(Vx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf16_mix_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_mix_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.qf32=vmpy(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf32_vmpy_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wqf32_vmpy_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_qf32_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpy_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vxx32.sf+=vmpy(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVhfVhf(HVX_VectorPair Vxx, HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpyacc_WsfVhfVhf(Vxx,Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_hf_acc)(Vxx,Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vmpy(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vmpy_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vmpy_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.hf=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vhf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vhf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vqf16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_Vqf16Vqf16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.qf16,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_Vqf16Vhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_Vqf16Vhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf16_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vqf32(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_Vqf32Vqf32(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.qf32,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_Vqf32Vsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_Vqf32Vsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_qf32_mix)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vsub(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VhfVhf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vsub_VhfVhf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_hf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 68 +/* ========================================================================== + Assembly Syntax: Vd32.sf=vsub(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vsf_vsub_VsfVsf(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vsf_vsub_VsfVsf(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_sf)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 68 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_WuhVub_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubrndsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vasr(Vuu32.uh,Vv32.ub):sat + C Intrinsic Prototype: HVX_Vector Q6_Vub_vasr_WuhVub_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vub_vasr_WuhVub_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvuhubsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):rnd:sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_rnd_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_WwVuh_rnd_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhrndsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vasr(Vuu32.w,Vv32.uh):sat + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vasr_WwVuh_sat(HVX_VectorPair Vuu, HVX_Vector Vv) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vuh_vasr_WwVuh_sat(Vuu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vasrvwuhsat)(Vuu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 69 +/* ========================================================================== + Assembly Syntax: Vd32.uh=vmpy(Vu32.uh,Vv32.uh):>>16 + C Intrinsic Prototype: HVX_Vector Q6_Vuh_vmpy_VuhVuh_rs16(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vuh_vmpy_VuhVuh_rs16(Vu,Vv) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpyuhvs)(Vu,Vv) +#endif /* __HEXAGON_ARCH___ >= 69 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vadd(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vadd_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vadd_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.h=Vu32.hf + C Intrinsic Prototype: HVX_Vector Q6_Vh_equals_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_equals_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_h_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.hf=Vu32.h + C Intrinsic Prototype: HVX_Vector Q6_Vhf_equals_Vh(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vhf_equals_Vh(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_hf_h)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.sf=Vu32.w + C Intrinsic Prototype: HVX_Vector Q6_Vsf_equals_Vw(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vsf_equals_Vw(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_sf_w)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.w=Vu32.sf + C Intrinsic Prototype: HVX_Vector Q6_Vw_equals_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_equals_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_w_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vcvt(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vcvt_VsfVsf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vcvt_VsfVsf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_bf_sf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gt_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gt_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtand_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtand_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtor_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtor_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.gt(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_gtxacc_QVbfVbf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_gtxacc_QVbfVbf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt) \ + ((__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vgtbf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vmax(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vmax_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_LATE Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vmax_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmax_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vd32.bf=vmin(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_Vector Q6_Vbf_vmin_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_LATE Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vbf_vmin_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmin_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vmpy(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpy_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpy_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vxx32.sf+=vmpy(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vmpyacc_WsfVbfVbf(HVX_VectorPair + Vxx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution + Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vmpyacc_WsfVbfVbf(Vxx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_sf_bf_acc)(Vxx, Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 73 +/* ========================================================================== + Assembly Syntax: Vdd32.sf=vsub(Vu32.bf,Vv32.bf) + C Intrinsic Prototype: HVX_VectorPair Q6_Wsf_vsub_VbfVbf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Wsf_vsub_VbfVbf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_bf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 73 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32=vgetqfext(Vu32.x,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vgetqfext_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vgetqfext_VR(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_get_qfext)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vx32|=vgetqfext(Vu32.x,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vgetqfextor_VVR(HVX_Vector Vx, + HVX_Vector Vu, Word32 Rt) Instruction Type: CVI_VX Execution Slots: + SLOT23 + ========================================================================== */ + +#define Q6_V_vgetqfextor_VVR(Vx, Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_get_qfext_oracc)(Vx, Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.x=vsetqfext(Vu32,Rt32) + C Intrinsic Prototype: HVX_Vector Q6_V_vsetqfext_VR(HVX_Vector Vu, Word32 Rt) + Instruction Type: CVI_VX + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vsetqfext_VR(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_set_qfext)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vabs(Vu32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vabs_V(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vabs_V(Vu) __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vadd(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vadd_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vadd_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vadd_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.b=vcvt2(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vb_vcvt2_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vb_vcvt2_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_b_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt2(Vu32.b) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt2_Vb(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt2_Vb(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_hf_b)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt2(Vu32.ub) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt2_Vub(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt2_Vub(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_hf_ub)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.ub=vcvt2(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vub_vcvt2_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vub_vcvt2_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt2_ub_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vcvt(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_Vector Q6_V_vcvt_VhfVhf(HVX_Vector Vu, HVX_Vector + Vv) Instruction Type: CVI_VX Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vcvt_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_f8_hf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vcvt(Vu32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vcvt_V(HVX_Vector Vu) + Instruction Type: CVI_VX_DV + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vcvt_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vcvt_hf_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfmax(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfmax_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfmax_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmax_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfmin(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfmin_VV(HVX_Vector Vu, HVX_Vector Vv) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfmin_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfmin_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.f8=vfneg(Vu32.f8) + C Intrinsic Prototype: HVX_Vector Q6_V_vfneg_V(HVX_Vector Vu) + Instruction Type: CVI_VX_LATE + Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_V_vfneg_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vfneg_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32=vmerge(Vu32.x,Vv32.w) + C Intrinsic Prototype: HVX_Vector Q6_V_vmerge_VVw(HVX_Vector Vu, HVX_Vector + Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_vmerge_VVw(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmerge_qf)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vmpy(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vmpy_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vmpy_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vxx32.hf+=vmpy(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vmpyacc_WhfVV(HVX_VectorPair + Vxx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution + Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vmpyacc_WhfVV(Vxx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_hf_f8_acc)(Vxx, Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.hf,Rt32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_VhfRhf(HVX_Vector Vu, Word32 + Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_VhfRhf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_hf)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vmpy(Vu32.qf16,Rt32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vmpy_Vqf16Rhf(HVX_Vector Vu, + Word32 Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf16_vmpy_Vqf16Rhf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_qf16)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vmpy(Vu32.sf,Rt32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vmpy_VsfRsf(HVX_Vector Vu, Word32 + Rt) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Vqf32_vmpy_VsfRsf(Vu, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vmpy_rt_sf)(Vu, Rt) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 79 +/* ========================================================================== + Assembly Syntax: Vdd32.hf=vsub(Vu32.f8,Vv32.f8) + C Intrinsic Prototype: HVX_VectorPair Q6_Whf_vsub_VV(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VX_DV Execution Slots: SLOT23 + ========================================================================== */ + +#define Q6_Whf_vsub_VV(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_f8)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 79 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vabs(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vabs_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vabs_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vabs(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vabs_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vabs_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vabs(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vabs_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vabs_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vabs(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vabs_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vabs_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vabs_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32=valign4(Vu32,Vv32,Rt8) + C Intrinsic Prototype: HVX_Vector Q6_V_valign4_VVR(HVX_Vector Vu, HVX_Vector + Vv, Word32 Rt) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_valign4_VVR(Vu, Vv, Rt) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_valign4)(Vu, Vv, Rt) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.bf=Vuu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vbf_equals_Wqf32(HVX_VectorPair Vuu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vbf_equals_Wqf32(Vuu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_bf_qf32)(Vuu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.f8=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_V_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_V_equals_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_f8_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.h=Vu32.hf:rnd + C Intrinsic Prototype: HVX_Vector Q6_Vh_equals_Vhf_rnd(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vh_equals_Vhf_rnd(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_h_hf_rnd)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vdd32.qf16=Vu32.f8 + C Intrinsic Prototype: HVX_VectorPair Q6_Wqf16_equals_V(HVX_Vector Vu) + Instruction Type: CVI_VP_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Wqf16_equals_V(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_f8)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=Vu32.hf + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_equals_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_equals_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=Vu32.qf16 + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_equals_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_equals_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=Vu32.qf32 + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_equals_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_equals_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=Vu32.sf + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_equals_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_equals_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vconv_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VhfVhf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VhfVhf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.hf,Vv32.hf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVhfVhf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVhfVhf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqhf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qd4=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eq_VsfVsf(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VA Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eq_VsfVsf(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf)(Vu, Vv)), -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4&=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqand_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqand_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_and)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4|=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqor_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqor_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_or)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Qx4^=vcmp.eq(Vu32.sf,Vv32.sf) + C Intrinsic Prototype: HVX_VectorPred Q6_Q_vcmp_eqxacc_QVsfVsf(HVX_VectorPred + Qx, HVX_Vector Vu, HVX_Vector Vv) Instruction Type: CVI_VA Execution + Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Q_vcmp_eqxacc_QVsfVsf(Qx, Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandqrt)( \ + (__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_veqsf_xor)( \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vandvrt)((Qx), -1), Vu, \ + Vv)), \ + -1) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.w=vilog2(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vw_vilog2_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vw_vilog2_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vilog2_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vneg(Vu32.hf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vneg_Vhf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vneg_Vhf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf16_hf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vneg(Vu32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vneg_Vqf16(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vneg_Vqf16(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf16_qf16)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vneg(Vu32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vneg_Vqf32(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vneg_Vqf32(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf32_qf32)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vneg(Vu32.sf) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vneg_Vsf(HVX_Vector Vu) + Instruction Type: CVI_VS + Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vneg_Vsf(Vu) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vneg_qf32_sf)(Vu) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf16=vsub(Vu32.hf,Vv32.qf16) + C Intrinsic Prototype: HVX_Vector Q6_Vqf16_vsub_VhfVqf16(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf16_vsub_VhfVqf16(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_hf_mix)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#if __HVX_ARCH__ >= 81 +/* ========================================================================== + Assembly Syntax: Vd32.qf32=vsub(Vu32.sf,Vv32.qf32) + C Intrinsic Prototype: HVX_Vector Q6_Vqf32_vsub_VsfVqf32(HVX_Vector Vu, + HVX_Vector Vv) Instruction Type: CVI_VS Execution Slots: SLOT0123 + ========================================================================== */ + +#define Q6_Vqf32_vsub_VsfVqf32(Vu, Vv) \ + __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_vsub_sf_mix)(Vu, Vv) +#endif /* __HEXAGON_ARCH___ >= 81 */ + +#endif /* __HVX__ */ + +#endif diff --git a/crates/stdarch-gen-hexagon/src/main.rs b/crates/stdarch-gen-hexagon/src/main.rs new file mode 100644 index 0000000000..3cfbabfe0a --- /dev/null +++ b/crates/stdarch-gen-hexagon/src/main.rs @@ -0,0 +1,1717 @@ +//! Hexagon HVX Code Generator +//! +//! This generator creates v64.rs and v128.rs from scratch using the LLVM HVX +//! header file as the sole source of truth. It parses the C intrinsic prototypes +//! and generates Rust wrapper functions with appropriate attributes. +//! +//! The two generated files provide: +//! - v64.rs: 64-byte vector mode intrinsics (512-bit vectors) +//! - v128.rs: 128-byte vector mode intrinsics (1024-bit vectors) +//! +//! Both modules are available unconditionally, but require the appropriate +//! target features to actually use the intrinsics. +//! +//! Usage: +//! cd crates/stdarch-gen-hexagon +//! cargo run +//! # Output is written to ../core_arch/src/hexagon/v64.rs and v128.rs + +use regex::Regex; +use std::collections::{HashMap, HashSet}; +use std::fs::File; +use std::io::Write; +use std::path::Path; + +/// Mappings from HVX intrinsics to architecture-independent SIMD intrinsics. +/// These intrinsics have equivalent semantics and can be lowered to the generic form. +fn get_simd_intrinsic_mappings() -> HashMap<&'static str, &'static str> { + let mut map = HashMap::new(); + // Bitwise operations (element-size independent) + map.insert("vxor", "simd_xor"); + map.insert("vand", "simd_and"); + map.insert("vor", "simd_or"); + // Word (32-bit) arithmetic operations + map.insert("vaddw", "simd_add"); + map.insert("vsubw", "simd_sub"); + map +} + +/// The tracking issue number for the stdarch_hexagon feature +const TRACKING_ISSUE: &str = "151523"; + +/// HVX vector length mode +#[derive(Debug, Clone, Copy, PartialEq)] +enum VectorMode { + /// 64-byte vectors (512 bits) + V64, + /// 128-byte vectors (1024 bits) + V128, +} + +impl VectorMode { + fn bytes(&self) -> u32 { + match self { + VectorMode::V64 => 64, + VectorMode::V128 => 128, + } + } + + fn bits(&self) -> u32 { + self.bytes() * 8 + } + + fn lanes(&self) -> u32 { + self.bytes() / 4 // 32-bit lanes + } + + fn target_feature(&self) -> &'static str { + match self { + VectorMode::V64 => "hvx-length64b", + VectorMode::V128 => "hvx-length128b", + } + } +} + +/// LLVM version the header file is from (for reference) +/// Source: https://github.com/llvm/llvm-project/blob/llvmorg-22.1.0-rc1/clang/lib/Headers/hvx_hexagon_protos.h +const LLVM_VERSION: &str = "22.1.0-rc1"; + +/// Maximum HVX architecture version supported by rustc +/// Check with: rustc --target=hexagon-unknown-linux-musl --print target-features +const MAX_SUPPORTED_ARCH: u32 = 79; + +/// Local header file path (checked into the repository) +const HEADER_FILE: &str = "hvx_hexagon_protos.h"; + +/// Intrinsic information parsed from the LLVM header +#[derive(Debug, Clone)] +struct IntrinsicInfo { + /// The Q6_* intrinsic name (e.g., "Q6_V_vadd_VV") + q6_name: String, + /// The LLVM builtin name without prefix (e.g., "V6_vaddb") + builtin_name: String, + /// The short instruction name for assert_instr (e.g., "vaddb") + instr_name: String, + /// The assembly syntax from the comment + asm_syntax: String, + /// Instruction type + instr_type: String, + /// Execution slots + exec_slots: String, + /// Minimum HVX architecture version required + min_arch: u32, + /// Return type + return_type: RustType, + /// Parameters (name, type) + params: Vec<(String, RustType)>, + /// Whether this is a compound intrinsic (multiple builtins) + is_compound: bool, + /// For compound intrinsics: the parsed expression tree + compound_expr: Option, +} + +/// Expression tree for compound intrinsics +#[derive(Debug, Clone)] +enum CompoundExpr { + /// A call to a builtin: (builtin_name without V6_ prefix, arguments) + BuiltinCall(String, Vec), + /// A parameter reference by name + Param(String), + /// An integer literal (like -1) + IntLiteral(i32), +} + +/// Rust type mappings +#[derive(Debug, Clone, PartialEq)] +enum RustType { + HvxVector, + HvxVectorPair, + HvxVectorPred, + I32, + MutPtrHvxVector, + Unit, +} + +impl RustType { + fn from_c_type(c_type: &str) -> Option { + match c_type.trim() { + "HVX_Vector" => Some(RustType::HvxVector), + "HVX_VectorPair" => Some(RustType::HvxVectorPair), + "HVX_VectorPred" => Some(RustType::HvxVectorPred), + "Word32" => Some(RustType::I32), + "HVX_Vector*" => Some(RustType::MutPtrHvxVector), + "void" => Some(RustType::Unit), + _ => None, + } + } + + fn to_rust_str(&self) -> &'static str { + match self { + RustType::HvxVector => "HvxVector", + RustType::HvxVectorPair => "HvxVectorPair", + RustType::HvxVectorPred => "HvxVectorPred", + RustType::I32 => "i32", + RustType::MutPtrHvxVector => "*mut HvxVector", + RustType::Unit => "()", + } + } + + fn to_extern_str(&self) -> &'static str { + match self { + RustType::HvxVector => "HvxVector", + RustType::HvxVectorPair => "HvxVectorPair", + RustType::HvxVectorPred => "HvxVectorPred", + RustType::I32 => "i32", + RustType::MutPtrHvxVector => "*mut HvxVector", + RustType::Unit => "()", + } + } +} + +/// Parse a compound macro expression into an expression tree +fn parse_compound_expr(expr: &str) -> Option { + let expr = expr.trim(); + + // Try to match an integer literal (like -1) + if let Ok(n) = expr.parse::() { + return Some(CompoundExpr::IntLiteral(n)); + } + + // Try to match a simple parameter name (Vu, Vv, Rt, Qs, Qt, Qx, Vx, etc.) + // These are typically short identifiers in the macro + if expr.len() <= 3 + && expr.chars().all(|c| c.is_ascii_alphanumeric() || c == '_') + && !expr.contains("__") + { + return Some(CompoundExpr::Param(expr.to_lowercase())); + } + + // Check if it's wrapped in extra parens first + if expr.starts_with('(') && expr.ends_with(')') { + // Check if these parens wrap the entire expression + let inner = &expr[1..expr.len() - 1]; + // Count depth: if after removing outer parens the expression is balanced, + // the outer parens were enclosing everything + if is_balanced_parens(inner) { + // But we also need to verify these aren't part of a function call + // If the inner expression is balanced and the whole thing starts with ( + // and ends with ), it's a paren wrapper + let result = parse_compound_expr(inner); + if result.is_some() { + return result; + } + } + } + + // Try to match __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_xxx)(args) + // The args portion may contain nested calls, so we need to find the matching paren + if expr.starts_with("__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_") { + // Find the end of the builtin name (after V6_) + let prefix = "__BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_"; + let after_prefix = &expr[prefix.len()..]; + if let Some(paren_pos) = after_prefix.find(')') { + let builtin_name = &after_prefix[..paren_pos]; + let rest = &after_prefix[paren_pos + 1..]; // Skip the closing ) of the WRAP + // rest should now be "(args)" + if rest.starts_with('(') && rest.ends_with(')') { + let args_str = &rest[1..rest.len() - 1]; + let args = parse_compound_args(args_str)?; + return Some(CompoundExpr::BuiltinCall(builtin_name.to_string(), args)); + } + } + } + + // Try to match __builtin_HEXAGON_V6_xxx(args) without wrap + if expr.starts_with("__builtin_HEXAGON_V6_") { + let prefix = "__builtin_HEXAGON_V6_"; + let after_prefix = &expr[prefix.len()..]; + if let Some(paren_pos) = after_prefix.find('(') { + let builtin_name = &after_prefix[..paren_pos]; + let rest = &after_prefix[paren_pos..]; + if rest.starts_with('(') && rest.ends_with(')') { + let args_str = &rest[1..rest.len() - 1]; + let args = parse_compound_args(args_str)?; + return Some(CompoundExpr::BuiltinCall(builtin_name.to_string(), args)); + } + } + } + + None +} + +/// Check if parentheses are balanced in a string +fn is_balanced_parens(s: &str) -> bool { + let mut depth = 0; + for c in s.chars() { + match c { + '(' => depth += 1, + ')' => { + depth -= 1; + if depth < 0 { + return false; + } + } + _ => {} + } + } + depth == 0 +} + +/// Parse comma-separated arguments, respecting nested parentheses +fn parse_compound_args(args_str: &str) -> Option> { + let mut args = Vec::new(); + let mut current = String::new(); + let mut depth = 0; + + for c in args_str.chars() { + match c { + '(' => { + depth += 1; + current.push(c); + } + ')' => { + depth -= 1; + current.push(c); + } + ',' if depth == 0 => { + let arg = current.trim().to_string(); + if !arg.is_empty() { + args.push(parse_compound_expr(&arg)?); + } + current.clear(); + } + _ => current.push(c), + } + } + + // Don't forget the last argument + let arg = current.trim().to_string(); + if !arg.is_empty() { + args.push(parse_compound_expr(&arg)?); + } + + Some(args) +} + +/// Extract all builtin names used in a compound expression +fn collect_builtins_from_expr(expr: &CompoundExpr, builtins: &mut HashSet) { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + builtins.insert(name.clone()); + for arg in args { + collect_builtins_from_expr(arg, builtins); + } + } + CompoundExpr::Param(_) | CompoundExpr::IntLiteral(_) => {} + } +} + +/// Read the local HVX header file +fn read_header(crate_dir: &Path) -> Result { + let header_path = crate_dir.join(HEADER_FILE); + println!("Reading HVX header from: {}", header_path.display()); + println!(" (LLVM version: {})", LLVM_VERSION); + + std::fs::read_to_string(&header_path).map_err(|e| { + format!( + "Failed to read header file {}: {}", + header_path.display(), + e + ) + }) +} + +/// Parse a C function prototype to extract return type and parameters +fn parse_prototype(prototype: &str) -> Option<(RustType, Vec<(String, RustType)>)> { + // Pattern: ReturnType FunctionName(ParamType1 Param1, ParamType2 Param2, ...) + let proto_re = Regex::new(r"(\w+(?:\*)?)\s+Q6_\w+\(([^)]*)\)").unwrap(); + + if let Some(caps) = proto_re.captures(prototype) { + let return_type_str = caps[1].trim(); + let params_str = &caps[2]; + + let return_type = RustType::from_c_type(return_type_str)?; + + let mut params = Vec::new(); + if !params_str.trim().is_empty() { + // Pattern: Type Name or Type* Name + let param_re = Regex::new(r"(\w+\*?)\s+(\w+)").unwrap(); + for param in params_str.split(',') { + let param = param.trim(); + if let Some(pcaps) = param_re.captures(param) { + let ptype_str = pcaps[1].trim(); + let pname = pcaps[2].to_lowercase(); + if let Some(ptype) = RustType::from_c_type(ptype_str) { + params.push((pname, ptype)); + } else { + return None; // Unknown type + } + } + } + } + + Some((return_type, params)) + } else { + None + } +} + +/// Parse the LLVM header file to extract intrinsic information +fn parse_header(content: &str) -> Vec { + let mut intrinsics = Vec::new(); + + let arch_re = Regex::new(r"#if __HVX_ARCH__ >= (\d+)").unwrap(); + + // Regex to extract the simple builtin name from a macro body + // Match: __BUILTIN_VECTOR_WRAP(__builtin_HEXAGON_V6_xxx)(args) + let simple_builtin_re = + Regex::new(r"__BUILTIN_VECTOR_WRAP\(__builtin_HEXAGON_(\w+)\)\([^)]*\)\s*$").unwrap(); + + // Also handle builtins without VECTOR_WRAP + let simple_builtin_re2 = Regex::new(r"__builtin_HEXAGON_(\w+)\([^)]*\)\s*$").unwrap(); + + // Regex to extract Q6 name from #define + let q6_name_re = Regex::new(r"#define\s+(Q6_\w+)").unwrap(); + + // Regex to extract macro expression body + let macro_expr_re = Regex::new(r"#define\s+Q6_\w+\([^)]*\)\s+(.+)").unwrap(); + + let lines: Vec<&str> = content.lines().collect(); + let mut current_arch: u32 = 60; + let mut i = 0; + + while i < lines.len() { + // Track architecture version + if let Some(caps) = arch_re.captures(lines[i]) { + if let Ok(arch) = caps[1].parse() { + current_arch = arch; + } + } + + // Look for Assembly Syntax comment block + if lines[i].contains("Assembly Syntax:") { + let mut asm_syntax = String::new(); + let mut prototype = String::new(); + let mut instr_type = String::new(); + let mut exec_slots = String::new(); + + // Parse the comment block + let mut j = i; + while j < lines.len() && !lines[j].starts_with("#define") { + let line = lines[j]; + if line.contains("Assembly Syntax:") { + if let Some(pos) = line.find("Assembly Syntax:") { + asm_syntax = line[pos + 16..].trim().to_string(); + } + } else if line.contains("C Intrinsic Prototype:") { + if let Some(pos) = line.find("C Intrinsic Prototype:") { + prototype = line[pos + 22..].trim().to_string(); + } + } else if line.contains("Instruction Type:") { + if let Some(pos) = line.find("Instruction Type:") { + instr_type = line[pos + 17..].trim().to_string(); + } + } else if line.contains("Execution Slots:") { + if let Some(pos) = line.find("Execution Slots:") { + exec_slots = line[pos + 16..].trim().to_string(); + } + } + j += 1; + } + + // Now find the #define line + while j < lines.len() && !lines[j].starts_with("#define") { + j += 1; + } + + if j < lines.len() { + let define_line = lines[j]; + + // Extract Q6 name and check if it's simple or compound + if let Some(caps) = q6_name_re.captures(define_line) { + let q6_name = caps[1].to_string(); + + // Get the full macro body (handle line continuations) + let mut macro_body = define_line.to_string(); + let mut k = j; + while macro_body.trim_end().ends_with('\\') && k + 1 < lines.len() { + k += 1; + macro_body.push_str(lines[k]); + } + + // Try to extract simple builtin name + let builtin_name = simple_builtin_re + .captures(¯o_body) + .or_else(|| simple_builtin_re2.captures(¯o_body)) + .map(|bcaps| bcaps[1].to_string()); + + // Check if it's a compound intrinsic (multiple __builtin calls) + let builtin_count = macro_body.matches("__builtin_HEXAGON_").count(); + let is_compound = builtin_count > 1; + + // Parse prototype + if let Some((return_type, params)) = parse_prototype(&prototype) { + if is_compound { + // For compound intrinsics, parse the expression + // Extract the macro body after the parameter list + if let Some(expr_caps) = macro_expr_re.captures(¯o_body) { + let expr_str = expr_caps[1].trim().replace(['\n', '\\'], " "); + let expr_str = expr_str.trim(); + + if let Some(compound_expr) = parse_compound_expr(expr_str) { + // For compound intrinsics, we use the outermost builtin + // as the "primary" for the instruction name + let (primary_builtin, instr_name) = match &compound_expr { + CompoundExpr::BuiltinCall(name, _) => { + (name.clone(), name.clone()) + } + _ => continue, + }; + + intrinsics.push(IntrinsicInfo { + q6_name, + builtin_name: format!("V6_{}", primary_builtin), + instr_name, + asm_syntax, + instr_type, + exec_slots, + min_arch: current_arch, + return_type, + params, + is_compound: true, + compound_expr: Some(compound_expr), + }); + } + } + } else if let Some(builtin) = builtin_name { + // Extract short instruction name + let instr_name = builtin + .strip_prefix("V6_") + .map(|s| s.to_string()) + .unwrap_or_else(|| builtin.clone()); + + intrinsics.push(IntrinsicInfo { + q6_name, + builtin_name: builtin, + instr_name, + asm_syntax, + instr_type, + exec_slots, + min_arch: current_arch, + return_type, + params, + is_compound: false, + compound_expr: None, + }); + } + } + } + } + i = j; + } + i += 1; + } + + intrinsics +} + +/// Convert Q6 name to Rust function name (lowercase with underscores) +fn q6_to_rust_name(q6_name: &str) -> String { + // Q6_V_hi_W -> q6_v_hi_w + q6_name.to_lowercase() +} + +/// Generate the module documentation +fn generate_module_doc(mode: VectorMode) -> String { + format!( + r#"//! Hexagon HVX {bytes}-byte vector mode intrinsics +//! +//! This module provides intrinsics for the Hexagon Vector Extensions (HVX) +//! in {bytes}-byte vector mode ({bits}-bit vectors). +//! +//! HVX is a wide vector extension designed for high-performance signal processing. +//! [Hexagon HVX Programmer's Reference Manual](https://docs.qualcomm.com/doc/80-N2040-61) +//! +//! ## Vector Types +//! +//! In {bytes}-byte mode: +//! - `HvxVector` is {bits} bits ({bytes} bytes) containing {lanes} x 32-bit values +//! - `HvxVectorPair` is {pair_bits} bits ({pair_bytes} bytes) +//! - `HvxVectorPred` is {bits} bits ({bytes} bytes) for predicate operations +//! +//! To use this module, compile with `-C target-feature=+{target_feature}`. +//! +//! ## Architecture Versions +//! +//! Different intrinsics require different HVX architecture versions. Use the +//! appropriate target feature to enable the required version: +//! - HVX v60: `-C target-feature=+hvxv60` (basic HVX operations) +//! - HVX v62: `-C target-feature=+hvxv62` +//! - HVX v65: `-C target-feature=+hvxv65` (includes floating-point support) +//! - HVX v66: `-C target-feature=+hvxv66` +//! - HVX v68: `-C target-feature=+hvxv68` +//! - HVX v69: `-C target-feature=+hvxv69` +//! - HVX v73: `-C target-feature=+hvxv73` +//! - HVX v79: `-C target-feature=+hvxv79` +//! +//! Each version includes all features from previous versions. +"#, + bytes = mode.bytes(), + bits = mode.bits(), + lanes = mode.lanes(), + pair_bytes = mode.bytes() * 2, + pair_bits = mode.bits() * 2, + target_feature = mode.target_feature(), + ) +} + +/// Generate the type definitions for a specific vector mode +fn generate_types(mode: VectorMode) -> String { + let lanes = mode.lanes(); + let pair_lanes = lanes * 2; + let bits = mode.bits(); + let bytes = mode.bytes(); + let pair_bits = bits * 2; + let pair_bytes = bytes * 2; + + format!( + r#" +#![allow(non_camel_case_types)] + +#[cfg(test)] +use stdarch_test::assert_instr; + +use crate::intrinsics::simd::{{simd_add, simd_and, simd_or, simd_sub, simd_xor}}; + +// HVX type definitions for {bytes}-byte vector mode +types! {{ + #![unstable(feature = "stdarch_hexagon", issue = "{TRACKING_ISSUE}")] + + /// HVX vector type ({bits} bits / {bytes} bytes) + /// + /// This type represents a single HVX vector register containing {lanes} x 32-bit values. + pub struct HvxVector({lanes} x i32); + + /// HVX vector pair type ({pair_bits} bits / {pair_bytes} bytes) + /// + /// This type represents a pair of HVX vector registers, often used for + /// operations that produce double-width results. + pub struct HvxVectorPair({pair_lanes} x i32); + + /// HVX vector predicate type ({bits} bits / {bytes} bytes) + /// + /// This type represents a predicate vector used for conditional operations. + /// Each bit corresponds to a lane in the vector. + pub struct HvxVectorPred({lanes} x i32); +}} +"#, + bytes = bytes, + bits = bits, + lanes = lanes, + pair_bits = pair_bits, + pair_bytes = pair_bytes, + pair_lanes = pair_lanes, + TRACKING_ISSUE = TRACKING_ISSUE, + ) +} + +/// Builtin signature information for extern declarations +struct BuiltinSignature { + /// The V6_ prefixed name + full_name: String, + /// The short name (without V6_) + short_name: String, + /// Return type + return_type: RustType, + /// Parameter types + param_types: Vec, +} + +/// Get known signatures for builtins used in compound operations +/// These are the helper builtins that don't have their own Q6_ wrapper +fn get_compound_helper_signatures() -> HashMap { + let mut map = HashMap::new(); + + // vandvrt: HVX_Vector -> i32 -> HVX_Vector + // Converts predicate to vector representation. LLVM uses HVX_Vector for both. + map.insert( + "vandvrt".to_string(), + BuiltinSignature { + full_name: "V6_vandvrt".to_string(), + short_name: "vandvrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + // vandqrt: HVX_Vector -> i32 -> HVX_Vector + // Converts vector representation back to predicate. LLVM uses HVX_Vector for both. + map.insert( + "vandqrt".to_string(), + BuiltinSignature { + full_name: "V6_vandqrt".to_string(), + short_name: "vandqrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + // vandvrt_acc: HVX_Vector -> HVX_Vector -> i32 -> HVX_Vector + map.insert( + "vandvrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandvrt_acc".to_string(), + short_name: "vandvrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // vandqrt_acc: HVX_Vector -> HVX_Vector -> i32 -> HVX_Vector + map.insert( + "vandqrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandqrt_acc".to_string(), + short_name: "vandqrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // pred_and: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_and".to_string(), + BuiltinSignature { + full_name: "V6_pred_and".to_string(), + short_name: "pred_and".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_and_n: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_and_n".to_string(), + BuiltinSignature { + full_name: "V6_pred_and_n".to_string(), + short_name: "pred_and_n".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_or: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_or".to_string(), + BuiltinSignature { + full_name: "V6_pred_or".to_string(), + short_name: "pred_or".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_or_n: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_or_n".to_string(), + BuiltinSignature { + full_name: "V6_pred_or_n".to_string(), + short_name: "pred_or_n".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_xor: HVX_Vector -> HVX_Vector -> HVX_Vector + map.insert( + "pred_xor".to_string(), + BuiltinSignature { + full_name: "V6_pred_xor".to_string(), + short_name: "pred_xor".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // pred_not: HVX_Vector -> HVX_Vector + map.insert( + "pred_not".to_string(), + BuiltinSignature { + full_name: "V6_pred_not".to_string(), + short_name: "pred_not".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector], + }, + ); + + // pred_scalar2: i32 -> HVX_Vector + map.insert( + "pred_scalar2".to_string(), + BuiltinSignature { + full_name: "V6_pred_scalar2".to_string(), + short_name: "pred_scalar2".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::I32], + }, + ); + + // Conditional store operations + map.insert( + "vS32b_qpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_qpred_ai".to_string(), + short_name: "vS32b_qpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nqpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nqpred_ai".to_string(), + short_name: "vS32b_nqpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nt_qpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nt_qpred_ai".to_string(), + short_name: "vS32b_nt_qpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vS32b_nt_nqpred_ai".to_string(), + BuiltinSignature { + full_name: "V6_vS32b_nt_nqpred_ai".to_string(), + short_name: "vS32b_nt_nqpred_ai".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::MutPtrHvxVector, + RustType::HvxVector, + ], + }, + ); + + // Conditional accumulation operations + for (suffix, _elem) in [("b", "byte"), ("h", "halfword"), ("w", "word")] { + // vaddbq, vaddhq, vaddwq + map.insert( + format!("vadd{}q", suffix), + BuiltinSignature { + full_name: format!("V6_vadd{}q", suffix), + short_name: format!("vadd{}q", suffix), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + // vaddbnq, vaddhnq, vaddwnq + map.insert( + format!("vadd{}nq", suffix), + BuiltinSignature { + full_name: format!("V6_vadd{}nq", suffix), + short_name: format!("vadd{}nq", suffix), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + + // Comparison operations with accumulation + // veqb_and, veqb_or, veqb_xor, etc. + for elem in ["b", "h", "w", "ub", "uh", "uw"] { + for op in ["and", "or", "xor"] { + // veq*_and, veq*_or, veq*_xor + map.insert( + format!("veq{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_veq{}_{}", elem, op), + short_name: format!("veq{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + // vgt*_and, vgt*_or, vgt*_xor + map.insert( + format!("vgt{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_vgt{}_{}", elem, op), + short_name: format!("vgt{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + } + + // Floating-point comparison operations (hf = half-float, sf = single-float) + for elem in ["hf", "sf"] { + // Basic comparison: vgt* + map.insert( + format!("vgt{}", elem), + BuiltinSignature { + full_name: format!("V6_vgt{}", elem), + short_name: format!("vgt{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + for op in ["and", "or", "xor"] { + // vgt*_and, vgt*_or, vgt*_xor + map.insert( + format!("vgt{}_{}", elem, op), + BuiltinSignature { + full_name: format!("V6_vgt{}_{}", elem, op), + short_name: format!("vgt{}_{}", elem, op), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + } + + // Prefix operations with predicate + for elem in ["b", "h", "w"] { + map.insert( + format!("vprefixq{}", elem), + BuiltinSignature { + full_name: format!("V6_vprefixq{}", elem), + short_name: format!("vprefixq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector], + }, + ); + } + + // Scatter operations with predicate + map.insert( + "vscattermhq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermhq".to_string(), + short_name: "vscattermhq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vscattermhwq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermhwq".to_string(), + short_name: "vscattermhwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVectorPair, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vscattermwq".to_string(), + BuiltinSignature { + full_name: "V6_vscattermwq".to_string(), + short_name: "vscattermwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // Add with carry saturation + map.insert( + "vaddcarrysat".to_string(), + BuiltinSignature { + full_name: "V6_vaddcarrysat".to_string(), + short_name: "vaddcarrysat".to_string(), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // Gather operations with predicate + map.insert( + "vgathermhq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermhq".to_string(), + short_name: "vgathermhq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + ], + }, + ); + + map.insert( + "vgathermhwq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermhwq".to_string(), + short_name: "vgathermhwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVectorPair, + ], + }, + ); + + map.insert( + "vgathermwq".to_string(), + BuiltinSignature { + full_name: "V6_vgathermwq".to_string(), + short_name: "vgathermwq".to_string(), + return_type: RustType::Unit, + param_types: vec![ + RustType::MutPtrHvxVector, + RustType::HvxVector, + RustType::I32, + RustType::I32, + RustType::HvxVector, + ], + }, + ); + + // Basic comparison operations (without accumulation) + for elem in ["b", "h", "w", "ub", "uh", "uw"] { + // vgt* - greater than + map.insert( + format!("vgt{}", elem), + BuiltinSignature { + full_name: format!("V6_vgt{}", elem), + short_name: format!("vgt{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + // veq* - equal + map.insert( + format!("veq{}", elem), + BuiltinSignature { + full_name: format!("V6_veq{}", elem), + short_name: format!("veq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + } + + // Conditional subtraction operations (vsub*q, vsub*nq) + for elem in ["b", "h", "w"] { + map.insert( + format!("vsub{}q", elem), + BuiltinSignature { + full_name: format!("V6_vsub{}q", elem), + short_name: format!("vsub{}q", elem), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + map.insert( + format!("vsub{}nq", elem), + BuiltinSignature { + full_name: format!("V6_vsub{}nq", elem), + short_name: format!("vsub{}nq", elem), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + } + + // vmux - vector mux (select based on predicate) + map.insert( + "vmux".to_string(), + BuiltinSignature { + full_name: "V6_vmux".to_string(), + short_name: "vmux".to_string(), + return_type: RustType::HvxVector, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // vswap - vector swap based on predicate + map.insert( + "vswap".to_string(), + BuiltinSignature { + full_name: "V6_vswap".to_string(), + short_name: "vswap".to_string(), + return_type: RustType::HvxVectorPair, + param_types: vec![ + RustType::HvxVector, + RustType::HvxVector, + RustType::HvxVector, + ], + }, + ); + + // shuffeq operations - take vectors (internal pred representation) and return vector + for elem in ["h", "w"] { + map.insert( + format!("shuffeq{}", elem), + BuiltinSignature { + full_name: format!("V6_shuffeq{}", elem), + short_name: format!("shuffeq{}", elem), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + } + + // Predicate AND with vector operations + map.insert( + "vandvqv".to_string(), + BuiltinSignature { + full_name: "V6_vandvqv".to_string(), + short_name: "vandvqv".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + map.insert( + "vandvnqv".to_string(), + BuiltinSignature { + full_name: "V6_vandvnqv".to_string(), + short_name: "vandvnqv".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector], + }, + ); + + // vandnqrt and vandnqrt_acc + map.insert( + "vandnqrt".to_string(), + BuiltinSignature { + full_name: "V6_vandnqrt".to_string(), + short_name: "vandnqrt".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::I32], + }, + ); + + map.insert( + "vandnqrt_acc".to_string(), + BuiltinSignature { + full_name: "V6_vandnqrt_acc".to_string(), + short_name: "vandnqrt_acc".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::HvxVector, RustType::HvxVector, RustType::I32], + }, + ); + + // pred_scalar2v2 + map.insert( + "pred_scalar2v2".to_string(), + BuiltinSignature { + full_name: "V6_pred_scalar2v2".to_string(), + short_name: "pred_scalar2v2".to_string(), + return_type: RustType::HvxVector, + param_types: vec![RustType::I32], + }, + ); + + map +} + +/// Generate extern declarations for all intrinsics for a specific vector mode +fn generate_extern_block(intrinsics: &[IntrinsicInfo], mode: VectorMode) -> String { + let mut output = String::new(); + + // Collect unique builtins to avoid duplicates + let mut seen_builtins: HashSet = HashSet::new(); + let mut decls: Vec<(String, String, RustType, Vec)> = Vec::new(); + + // First, add simple intrinsics + for info in intrinsics.iter().filter(|i| !i.is_compound) { + if seen_builtins.contains(&info.builtin_name) { + continue; + } + seen_builtins.insert(info.builtin_name.clone()); + + let param_types: Vec = info.params.iter().map(|(_, t)| t.clone()).collect(); + decls.push(( + info.builtin_name.clone(), + info.instr_name.clone(), + info.return_type.clone(), + param_types, + )); + } + + // Then, collect all builtins used in compound expressions + let helper_sigs = get_compound_helper_signatures(); + let mut compound_builtins: HashSet = HashSet::new(); + + for info in intrinsics.iter().filter(|i| i.is_compound) { + if let Some(ref expr) = info.compound_expr { + collect_builtins_from_expr(expr, &mut compound_builtins); + } + } + + // Add compound helper builtins + let mut missing_builtins = Vec::new(); + for builtin_name in compound_builtins { + let full_name = format!("V6_{}", builtin_name); + if seen_builtins.contains(&full_name) { + continue; + } + seen_builtins.insert(full_name.clone()); + + if let Some(sig) = helper_sigs.get(&builtin_name) { + decls.push(( + sig.full_name.clone(), + sig.short_name.clone(), + sig.return_type.clone(), + sig.param_types.clone(), + )); + } else { + missing_builtins.push(builtin_name); + } + } + + // Report missing builtins (for development purposes) + if !missing_builtins.is_empty() { + eprintln!("Warning: Missing helper signatures for compound builtins:"); + for name in &missing_builtins { + eprintln!(" - {}", name); + } + } + + // Sort by builtin name for consistent output + decls.sort_by(|a, b| a.0.cmp(&b.0)); + + // Generate intrinsic declarations for the specified mode + output.push_str(&format!( + "// LLVM intrinsic declarations for {}-byte vector mode\n", + mode.bytes() + )); + output.push_str("#[allow(improper_ctypes)]\n"); + output.push_str("unsafe extern \"unadjusted\" {\n"); + + for (builtin_name, instr_name, return_type, param_types) in &decls { + let base_link = builtin_name.replace('_', "."); + // 128-byte mode uses .128B suffix, 64-byte mode doesn't + let link_name = if builtin_name.starts_with("V6_") && mode == VectorMode::V128 { + format!("llvm.hexagon.{}.128B", base_link) + } else { + format!("llvm.hexagon.{}", base_link) + }; + + let params_str = if param_types.is_empty() { + String::new() + } else { + param_types + .iter() + .map(|t| format!("_: {}", t.to_extern_str())) + .collect::>() + .join(", ") + }; + + let return_str = if *return_type == RustType::Unit { + " -> ()".to_string() + } else { + format!(" -> {}", return_type.to_extern_str()) + }; + + output.push_str(&format!( + " #[link_name = \"{}\"]\n fn {}({}){};\n", + link_name, instr_name, params_str, return_str + )); + } + + output.push_str("}\n"); + output +} + +/// Generate Rust code for a compound expression +/// `params` maps parameter names to their types in the function signature +/// Get the type of an expression +fn get_expr_type( + expr: &CompoundExpr, + params: &HashMap, + helper_sigs: &HashMap, +) -> Option { + match expr { + CompoundExpr::BuiltinCall(name, _) => { + helper_sigs.get(name).map(|sig| sig.return_type.clone()) + } + CompoundExpr::Param(name) => params.get(name).cloned(), + CompoundExpr::IntLiteral(_) => Some(RustType::I32), + } +} + +fn generate_compound_expr_code( + expr: &CompoundExpr, + params: &HashMap, + helper_sigs: &HashMap, +) -> String { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + // Get the expected parameter types for this builtin + let expected_types = helper_sigs + .get(name) + .map(|sig| sig.param_types.clone()) + .unwrap_or_default(); + + let args_code: Vec = args + .iter() + .enumerate() + .map(|(i, arg)| { + let arg_code = generate_compound_expr_code(arg, params, helper_sigs); + + // Check if we need to transmute this argument + let expected_type = expected_types.get(i); + let actual_type = get_expr_type(arg, params, helper_sigs); + + // If the builtin expects HvxVector but the arg is HvxVectorPred, transmute + if expected_type == Some(&RustType::HvxVector) + && actual_type == Some(RustType::HvxVectorPred) + { + format!( + "core::mem::transmute::({})", + arg_code + ) + } else { + arg_code + } + }) + .collect(); + format!("{}({})", name, args_code.join(", ")) + } + CompoundExpr::Param(name) => name.clone(), + CompoundExpr::IntLiteral(n) => n.to_string(), + } +} + +/// Get the primary instruction name from a compound expression (innermost significant op) +fn get_compound_primary_instr(expr: &CompoundExpr) -> Option { + match expr { + CompoundExpr::BuiltinCall(name, args) => { + // For vandqrt wrapper, look inside + if name == "vandqrt" && !args.is_empty() { + if let Some(inner) = get_compound_primary_instr(&args[0]) { + return Some(inner); + } + } + // For store operations, use the store name + if name.starts_with("vS32b") { + return Some(name.clone()); + } + // For conditional accumulation, use the add name + if name.starts_with("vadd") && (name.ends_with("q") || name.ends_with("nq")) { + return Some(name.clone()); + } + // For predicate operations + if name.starts_with("pred_") { + return Some(name.clone()); + } + // For comparison operations with accumulation + if (name.starts_with("veq") || name.starts_with("vgt")) + && (name.ends_with("_and") || name.ends_with("_or") || name.ends_with("_xor")) + { + return Some(name.clone()); + } + Some(name.clone()) + } + _ => None, + } +} + +/// Get override implementations for specific compound intrinsics. +/// Some C macros rely on implicit type conversions that don't work with +/// our stricter Rust types, so we provide corrected implementations. +fn get_compound_overrides() -> HashMap<&'static str, &'static str> { + let mut map = HashMap::new(); + + // Q6_V_vand_QR: takes pred, returns vec + // Use transmute to convert pred to vec for LLVM, call vandvrt + map.insert( + "Q6_V_vand_QR", + "vandvrt(core::mem::transmute::(qu), rt)", + ); + + // Q6_V_vandor_VQR: takes vec and pred, returns vec + map.insert( + "Q6_V_vandor_VQR", + "vandvrt_acc(vx, core::mem::transmute::(qu), rt)", + ); + + // Q6_Q_vand_VR: takes vec, returns pred + map.insert( + "Q6_Q_vand_VR", + "core::mem::transmute::(vandqrt(vu, rt))", + ); + + // Q6_Q_vandor_QVR: takes pred and vec, returns pred + map.insert( + "Q6_Q_vandor_QVR", + "core::mem::transmute::(vandqrt_acc(core::mem::transmute::(qx), vu, rt))", + ); + + map +} + +/// Generate wrapper functions for all intrinsics +fn generate_functions(intrinsics: &[IntrinsicInfo]) -> String { + let mut output = String::new(); + let simd_mappings = get_simd_intrinsic_mappings(); + + // Generate simple intrinsics + for info in intrinsics.iter().filter(|i| !i.is_compound) { + let rust_name = q6_to_rust_name(&info.q6_name); + + // Generate doc comment + output.push_str(&format!("/// `{}`\n", info.asm_syntax)); + output.push_str("///\n"); + output.push_str(&format!("/// Instruction Type: {}\n", info.instr_type)); + output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); + + // Generate attributes + output.push_str("#[inline(always)]\n"); + output.push_str(&format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", + info.min_arch + )); + + // Check if we should use simd intrinsic instead + let use_simd = simd_mappings.get(info.instr_name.as_str()); + + // assert_instr uses the original instruction name + output.push_str(&format!( + "#[cfg_attr(test, assert_instr({}))]\n", + info.instr_name + )); + + output.push_str(&format!( + "#[unstable(feature = \"stdarch_hexagon\", issue = \"{}\")]\n", + TRACKING_ISSUE + )); + + // Generate function signature + let params_str = info + .params + .iter() + .map(|(name, ty)| format!("{}: {}", name, ty.to_rust_str())) + .collect::>() + .join(", "); + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + output.push_str(&format!( + "pub unsafe fn {}({}){} {{\n", + rust_name, params_str, return_str + )); + + // Generate function body + let args_str = info + .params + .iter() + .map(|(name, _)| name.as_str()) + .collect::>() + .join(", "); + + if let Some(simd_fn) = use_simd { + // Use architecture-independent simd intrinsic + output.push_str(&format!(" {}({})\n", simd_fn, args_str)); + } else { + // Use the LLVM intrinsic + output.push_str(&format!(" {}({})\n", info.instr_name, args_str)); + } + + output.push_str("}\n\n"); + } + + // Generate compound intrinsics + let helper_sigs = get_compound_helper_signatures(); + let overrides = get_compound_overrides(); + for info in intrinsics.iter().filter(|i| i.is_compound) { + if let Some(ref compound_expr) = info.compound_expr { + let rust_name = q6_to_rust_name(&info.q6_name); + + // Get the primary instruction for assert_instr + let _primary_instr = get_compound_primary_instr(compound_expr) + .unwrap_or_else(|| info.instr_name.clone()); + + // Generate doc comment + output.push_str(&format!("/// `{}`\n", info.asm_syntax)); + output.push_str("///\n"); + output.push_str( + "/// This is a compound operation composed of multiple HVX instructions.\n", + ); + if !info.instr_type.is_empty() { + output.push_str(&format!("/// Instruction Type: {}\n", info.instr_type)); + } + if !info.exec_slots.is_empty() { + output.push_str(&format!("/// Execution Slots: {}\n", info.exec_slots)); + } + + // Generate attributes + output.push_str("#[inline(always)]\n"); + output.push_str(&format!( + "#[cfg_attr(target_arch = \"hexagon\", target_feature(enable = \"hvxv{}\"))]\n", + info.min_arch + )); + + // For compound ops, we skip assert_instr since they emit multiple instructions + // output.push_str(&format!( + // "#[cfg_attr(test, assert_instr({}))]\n", + // primary_instr + // )); + + output.push_str(&format!( + "#[unstable(feature = \"stdarch_hexagon\", issue = \"{}\")]\n", + TRACKING_ISSUE + )); + + // Generate function signature + let params_str = info + .params + .iter() + .map(|(name, ty)| format!("{}: {}", name, ty.to_rust_str())) + .collect::>() + .join(", "); + + let return_str = if info.return_type == RustType::Unit { + String::new() + } else { + format!(" -> {}", info.return_type.to_rust_str()) + }; + + output.push_str(&format!( + "pub unsafe fn {}({}){} {{\n", + rust_name, params_str, return_str + )); + + // Check if we have an override for this intrinsic + let body = if let Some(override_body) = overrides.get(info.q6_name.as_str()) { + override_body.to_string() + } else { + // Build param type map for expression code generation + let param_types: HashMap = info.params.iter().cloned().collect(); + // Generate function body from compound expression + let expr_body = + generate_compound_expr_code(compound_expr, ¶m_types, &helper_sigs); + + // Check if we need to transmute the result + let expr_return_type = get_expr_type(compound_expr, ¶m_types, &helper_sigs); + if info.return_type == RustType::HvxVectorPred + && expr_return_type == Some(RustType::HvxVector) + { + format!( + "core::mem::transmute::({})", + expr_body + ) + } else { + expr_body + } + }; + output.push_str(&format!(" {}\n", body)); + + output.push_str("}\n\n"); + } + } + + output +} + +/// Generate a module file for a specific vector mode +fn generate_module_file( + intrinsics: &[IntrinsicInfo], + output_path: &Path, + mode: VectorMode, +) -> Result<(), String> { + let mut output = + File::create(output_path).map_err(|e| format!("Failed to create output: {}", e))?; + + writeln!(output, "{}", generate_module_doc(mode)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_types(mode)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_extern_block(intrinsics, mode)).map_err(|e| e.to_string())?; + writeln!(output, "{}", generate_functions(intrinsics)).map_err(|e| e.to_string())?; + + // Ensure file is flushed before running rustfmt + drop(output); + + // Run rustfmt on the generated file + let status = std::process::Command::new("rustfmt") + .arg(output_path) + .status() + .map_err(|e| format!("Failed to run rustfmt: {}", e))?; + + if !status.success() { + return Err("rustfmt failed".to_string()); + } + + Ok(()) +} + +fn main() -> Result<(), String> { + println!("=== Hexagon HVX Code Generator ===\n"); + + // Get the crate directory first (needed for both reading header and writing output) + let crate_dir = std::env::var("CARGO_MANIFEST_DIR") + .map(std::path::PathBuf::from) + .unwrap_or_else(|_| std::env::current_dir().unwrap()); + + // Read and parse the local LLVM header + println!("Step 1: Reading LLVM HVX header..."); + let header_content = read_header(&crate_dir)?; + println!(" Read {} bytes", header_content.len()); + + println!("\nStep 2: Parsing intrinsic definitions..."); + let all_intrinsics = parse_header(&header_content); + println!(" Found {} intrinsic definitions", all_intrinsics.len()); + + // Filter out intrinsics requiring architecture versions not yet supported by rustc + let intrinsics: Vec<_> = all_intrinsics + .into_iter() + .filter(|i| i.min_arch <= MAX_SUPPORTED_ARCH) + .collect(); + let filtered_count = intrinsics.len(); + println!( + " Filtered to {} intrinsics (max supported: hvxv{})", + filtered_count, MAX_SUPPORTED_ARCH + ); + + // Count simple vs compound + let simple_count = intrinsics.iter().filter(|i| !i.is_compound).count(); + let compound_count = intrinsics.iter().filter(|i| i.is_compound).count(); + println!(" Simple intrinsics: {}", simple_count); + println!(" Compound intrinsics: {}", compound_count); + + // Print some sample intrinsics for verification + println!("\n Sample simple intrinsics:"); + for info in intrinsics.iter().filter(|i| !i.is_compound).take(5) { + println!( + " {} -> {} ({})", + info.q6_name, info.builtin_name, info.asm_syntax + ); + } + + println!("\n Sample compound intrinsics:"); + for info in intrinsics.iter().filter(|i| i.is_compound).take(5) { + println!(" {} ({})", info.q6_name, info.asm_syntax); + } + + // Count architecture versions + let mut arch_counts: HashMap = HashMap::new(); + for info in &intrinsics { + *arch_counts.entry(info.min_arch).or_insert(0) += 1; + } + println!("\n By architecture version:"); + let mut archs: Vec<_> = arch_counts.iter().collect(); + archs.sort_by_key(|(k, _)| *k); + for (arch, count) in archs { + println!(" HVX v{}: {} intrinsics", arch, count); + } + + // Generate output files + let hexagon_dir = crate_dir.join("../core_arch/src/hexagon"); + + // Generate v64.rs (64-byte vector mode) + let v64_path = hexagon_dir.join("v64.rs"); + println!("\nStep 3: Generating v64.rs (64-byte mode)..."); + generate_module_file(&intrinsics, &v64_path, VectorMode::V64)?; + println!(" Output: {}", v64_path.display()); + + // Generate v128.rs (128-byte vector mode) + let v128_path = hexagon_dir.join("v128.rs"); + println!("\nStep 4: Generating v128.rs (128-byte mode)..."); + generate_module_file(&intrinsics, &v128_path, VectorMode::V128)?; + println!(" Output: {}", v128_path.display()); + + println!("\n=== Results ==="); + println!( + " Generated {} simple wrapper functions per module", + simple_count + ); + println!( + " Generated {} compound wrapper functions per module", + compound_count + ); + println!( + " Total: {} functions per module", + simple_count + compound_count + ); + println!(" Output files: v64.rs, v128.rs"); + + Ok(()) +} diff --git a/examples/Cargo.toml b/examples/Cargo.toml index 61451edee8..c4fc4c7e37 100644 --- a/examples/Cargo.toml +++ b/examples/Cargo.toml @@ -23,6 +23,11 @@ path = "hex.rs" name = "connect5" path = "connect5.rs" +# Hexagon-only: requires --target hexagon-unknown-linux-musl +[[bin]] +name = "gaussian" +path = "gaussian.rs" + [[example]] name = "wasm" crate-type = ["cdylib"] diff --git a/examples/gaussian.rs b/examples/gaussian.rs new file mode 100644 index 0000000000..dea16f797a --- /dev/null +++ b/examples/gaussian.rs @@ -0,0 +1,351 @@ +//! Hexagon HVX Gaussian 3x3 blur example +//! +//! This example demonstrates the use of Hexagon HVX intrinsics to implement +//! a 3x3 Gaussian blur filter on unsigned 8-bit images. +//! +//! The 3x3 Gaussian kernel is: +//! 1 2 1 +//! 2 4 2 / 16 +//! 1 2 1 +//! +//! This is a separable filter: `[1 2 1]^T * [1 2 1] / 16`. +//! +//! On Hexagon targets, this implementation uses `HvxVectorPair` for widening +//! arithmetic to achieve full precision in the Gaussian computation, avoiding +//! the approximation errors of byte-averaging approaches. On other targets, +//! it runs a reference implementation in pure Rust. +//! +//! # Building and Running (Hexagon) +//! +//! To build (requires Hexagon toolchain): +//! +//! RUSTFLAGS="-C target-feature=+hvxv62,+hvx-length128b \ +//! -C linker=hexagon-unknown-linux-musl-clang" \ +//! cargo +nightly build -p stdarch_examples --bin gaussian \ +//! --target hexagon-unknown-linux-musl \ +//! -Zbuild-std -Zbuild-std-features=llvm-libunwind +//! +//! To run under QEMU: +//! +//! qemu-hexagon -L /target/hexagon-unknown-linux-musl \ +//! target/hexagon-unknown-linux-musl/debug/gaussian +//! +//! # Building and Running (Other targets) +//! +//! cargo +nightly run -p stdarch_examples --bin gaussian + +#![cfg_attr(target_arch = "hexagon", feature(stdarch_hexagon))] +#![cfg_attr(target_arch = "hexagon", feature(hexagon_target_feature))] +#![allow( + unsafe_op_in_unsafe_fn, + clippy::unwrap_used, + clippy::print_stdout, + clippy::missing_docs_in_private_items, + clippy::cast_possible_wrap, + clippy::cast_ptr_alignment +)] + +/// Image width - must be multiple of HVX vector length on Hexagon +const WIDTH: usize = 256; +const HEIGHT: usize = 16; + +// ============================================================================ +// Hexagon HVX implementation +// ============================================================================ + +#[cfg(target_arch = "hexagon")] +mod hvx { + #[cfg(not(target_feature = "hvx-length128b"))] + use core_arch::arch::hexagon::v64::*; + #[cfg(target_feature = "hvx-length128b")] + use core_arch::arch::hexagon::v128::*; + + /// Vector length in bytes for HVX 128-byte mode + #[cfg(target_feature = "hvx-length128b")] + const VLEN: usize = 128; + + /// Vector length in bytes for HVX 64-byte mode + #[cfg(not(target_feature = "hvx-length128b"))] + const VLEN: usize = 64; + + /// Vertical 1-2-1 filter pass using HvxVectorPair widening arithmetic + /// + /// Computes: dst[x] = (row_above[x] + 2*center[x] + row_below[x] + 2) >> 2 + /// + /// Uses HvxVectorPair to widen u8 to u16 for precise arithmetic, avoiding + /// the rounding errors of byte-averaging approximations. + /// + /// # Safety + /// + /// - `src` must point to the center row with valid data at -stride and +stride + /// - `dst` must point to a valid output buffer for `width` bytes + /// - `width` must be a multiple of VLEN + /// - All pointers must be HVX-aligned (128-byte for 128B mode) + #[target_feature(enable = "hvxv62")] + unsafe fn vertical_121_pass(src: *const u8, stride: isize, width: usize, dst: *mut u8) { + let inp0 = src.offset(-stride) as *const HvxVector; + let inp1 = src as *const HvxVector; + let inp2 = src.offset(stride) as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + for i in 0..n_chunks { + let above = *inp0.add(i); + let center = *inp1.add(i); + let below = *inp2.add(i); + + // Widen above + below to 16-bit using HvxVectorPair + // q6_wh_vadd_vubvub: adds two u8 vectors, producing u16 results in a pair + let above_plus_below: HvxVectorPair = q6_wh_vadd_vubvub(above, below); + + // Widen center * 2 (add center to itself) + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(center, center); + + // Add them: (above + below) + (center * 2) = above + 2*center + below + let sum: HvxVectorPair = q6_wh_vadd_whwh(above_plus_below, center_x2); + + // Extract high and low vectors from the pair (each contains u16 values) + let sum_lo = q6_v_lo_w(sum); // Lower 64 elements as i16 + let sum_hi = q6_v_hi_w(sum); // Upper 64 elements as i16 + + // Arithmetic right shift by 2 (divide by 4) with rounding + // Add 2 for rounding before shift: (sum + 2) >> 2 + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation: takes hi and lo halfword vectors, + // saturates to u8, and interleaves them back to original order + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + + *outp.add(i) = result; + } + } + + /// Horizontal 1-2-1 filter pass using HvxVectorPair widening arithmetic + /// + /// Computes: dst[x] = (src[x-1] + 2*src[x] + src[x+1] + 2) >> 2 + /// + /// Uses `valign` and `vlalign` to shift vectors by 1 byte for neighbor access, + /// then HvxVectorPair for precise widening arithmetic. + /// + /// # Safety + /// + /// - `src` and `dst` must point to valid buffers of `width` bytes + /// - `width` must be a multiple of VLEN + /// - All pointers must be HVX-aligned + #[target_feature(enable = "hvxv62")] + unsafe fn horizontal_121_pass(src: *const u8, width: usize, dst: *mut u8) { + let inp = src as *const HvxVector; + let outp = dst as *mut HvxVector; + + let n_chunks = width / VLEN; + let mut prev = q6_v_vzero(); + + for i in 0..n_chunks { + let curr = *inp.add(i); + let next = if i + 1 < n_chunks { + *inp.add(i + 1) + } else { + q6_v_vzero() + }; + + // Left neighbor (x-1): shift curr right by 1 byte, filling from prev + let left = q6_v_vlalign_vvr(curr, prev, 1); + + // Right neighbor (x+1): shift curr left by 1 byte, filling from next + let right = q6_v_valign_vvr(next, curr, 1); + + // Widen left + right to 16-bit + let left_plus_right: HvxVectorPair = q6_wh_vadd_vubvub(left, right); + + // Widen center * 2 + let center_x2: HvxVectorPair = q6_wh_vadd_vubvub(curr, curr); + + // Add: left + 2*center + right + let sum: HvxVectorPair = q6_wh_vadd_whwh(left_plus_right, center_x2); + + // Extract high and low vectors + let sum_lo = q6_v_lo_w(sum); + let sum_hi = q6_v_hi_w(sum); + + // Arithmetic right shift by 2 with rounding + let two = q6_vh_vsplat_r(2); + let sum_lo_rounded = q6_vh_vadd_vhvh(sum_lo, two); + let sum_hi_rounded = q6_vh_vadd_vhvh(sum_hi, two); + let shifted_lo = q6_vh_vasr_vhvh(sum_lo_rounded, two); + let shifted_hi = q6_vh_vasr_vhvh(sum_hi_rounded, two); + + // Pack back to u8 with saturation + let result = q6_vub_vsat_vhvh(shifted_hi, shifted_lo); + + *outp.add(i) = result; + + prev = curr; + } + } + + /// Apply Gaussian 3x3 blur to an entire image using separable filtering + /// + /// Two-pass approach: + /// 1. Vertical pass: apply 1-2-1 filter across rows + /// 2. Horizontal pass: apply 1-2-1 filter across columns + /// + /// Combined effect: 3x3 Gaussian kernel [1 2 1; 2 4 2; 1 2 1] / 16 + /// + /// # Safety + /// + /// - `src` and `dst` must point to valid image buffers of `stride * height` bytes + /// - `tmp` must point to a valid temporary buffer of `width` bytes, HVX-aligned + /// - `width` must be a multiple of VLEN and >= VLEN + /// - `stride` must be >= `width` + /// - All buffers must be HVX-aligned (128-byte for 128B mode) + #[target_feature(enable = "hvxv62")] + pub unsafe fn gaussian3x3u8( + src: *const u8, + stride: usize, + width: usize, + height: usize, + dst: *mut u8, + tmp: *mut u8, + ) { + let stride_i = stride as isize; + + // Process interior rows (skip first and last which lack vertical neighbors) + for y in 1..height - 1 { + let row_src = src.offset(y as isize * stride_i); + let row_dst = dst.offset(y as isize * stride_i); + + // Pass 1: vertical 1-2-1 into tmp + vertical_121_pass(row_src, stride_i, width, tmp); + + // Pass 2: horizontal 1-2-1 from tmp into dst + horizontal_121_pass(tmp, width, row_dst); + } + } +} + +// ============================================================================ +// Reference implementation (works on all targets) +// ============================================================================ + +/// Reference implementation of Gaussian 3x3 blur +/// +/// Kernel: +/// 1 2 1 +/// 2 4 2 / 16 +/// 1 2 1 +fn gaussian3x3u8_reference(src: &[u8], stride: usize, width: usize, height: usize, dst: &mut [u8]) { + for y in 1..height - 1 { + for x in 1..width - 1 { + // Compute column sums (vertical 1-2-1 weights) + let mut col = [0u32; 3]; + for i in 0..3 { + col[i] = 1 * src[(y - 1) * stride + x - 1 + i] as u32 + + 2 * src[y * stride + x - 1 + i] as u32 + + 1 * src[(y + 1) * stride + x - 1 + i] as u32; + } + // Apply horizontal 1-2-1 weights and normalize + // (1*col[0] + 2*col[1] + 1*col[2] + 8) / 16 + dst[y * stride + x] = ((1 * col[0] + 2 * col[1] + 1 * col[2] + 8) >> 4) as u8; + } + } +} + +/// Generate deterministic test pattern +fn generate_test_pattern(buf: &mut [u8], width: usize, height: usize) { + for y in 0..height { + for x in 0..width { + buf[y * width + x] = ((x + y * 7) % 256) as u8; + } + } +} + +// ============================================================================ +// Main: runs HVX + reference on Hexagon, reference-only on other targets +// ============================================================================ + +#[cfg(target_arch = "hexagon")] +fn main() { + // Aligned buffers for HVX + #[repr(align(128))] + struct AlignedBuf([u8; N]); + + let mut src = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut dst_hvx = AlignedBuf::<{ WIDTH * HEIGHT }>([0u8; WIDTH * HEIGHT]); + let mut tmp = AlignedBuf::<{ WIDTH }>([0u8; WIDTH]); + let mut dst_ref = vec![0u8; WIDTH * HEIGHT]; + + // Generate test pattern + generate_test_pattern(&mut src.0, WIDTH, HEIGHT); + + // Run HVX implementation + unsafe { + hvx::gaussian3x3u8( + src.0.as_ptr(), + WIDTH, + WIDTH, + HEIGHT, + dst_hvx.0.as_mut_ptr(), + tmp.0.as_mut_ptr(), + ); + } + + // Run reference + gaussian3x3u8_reference(&src.0, WIDTH, WIDTH, HEIGHT, &mut dst_ref); + + // Verify HVX matches reference (allowing small rounding differences) + let mut max_diff = 0i32; + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + let diff = (dst_hvx.0[idx] as i32 - dst_ref[idx] as i32).abs(); + max_diff = max_diff.max(diff); + // Allow up to 1 LSB difference due to rounding + assert!( + diff <= 1, + "HVX differs from reference at ({}, {}): hvx={}, ref={}, diff={}", + x, + y, + dst_hvx.0[idx], + dst_ref[idx], + diff + ); + } + } + + println!( + "Gaussian 3x3 HVX test passed! Max difference from reference: {}", + max_diff + ); +} + +#[cfg(not(target_arch = "hexagon"))] +fn main() { + let mut src = vec![0u8; WIDTH * HEIGHT]; + let mut dst = vec![0u8; WIDTH * HEIGHT]; + + // Generate test pattern + generate_test_pattern(&mut src, WIDTH, HEIGHT); + + // Run reference implementation + gaussian3x3u8_reference(&src, WIDTH, WIDTH, HEIGHT, &mut dst); + + // Verify output is non-trivial (blurred values differ from input) + let mut changed = 0; + for y in 1..HEIGHT - 1 { + for x in 1..WIDTH - 1 { + let idx = y * WIDTH + x; + if src[idx] != dst[idx] { + changed += 1; + } + } + } + + println!( + "Gaussian 3x3 reference test passed! {} pixels changed by blur", + changed + ); +}