summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorphyBrackets <singh.shivamsingh2003@gmail.com>2022-02-13 00:04:01 +0530
committerphyBrackets <singh.shivamsingh2003@gmail.com>2022-02-13 00:04:01 +0530
commit038a71a62bf3f5d7428a7b70d02035ba997b0404 (patch)
tree3d5cd437527223cc23bec370e631abedc535f19a
parent4f72fbab3d1456be8e14185443b5271df21eb995 (diff)
parent84bb14599f14b590edb2c1e45b5548af3554e551 (diff)
Merge branch 'main' of https://github.com/llvm/llvm-project into arcpatch-D119364arcpatch-D119364
-rw-r--r--clang-tools-extra/clang-doc/Serialize.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/bugprone/VirtualNearMissCheck.cpp4
-rw-r--r--clang-tools-extra/clang-tidy/cppcoreguidelines/ProBoundsConstantArrayIndexCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/llvmlibc/CalleeNamespaceCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/performance/NoexceptMoveConstructorCheck.cpp2
-rw-r--r--clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp29
-rw-r--r--clang-tools-extra/clang-tidy/readability/FunctionCognitiveComplexityCheck.cpp3
-rw-r--r--clang-tools-extra/clang-tidy/readability/RedundantControlFlowCheck.cpp2
-rw-r--r--clang/docs/ClangFormatStyleOptions.rst123
-rw-r--r--clang/docs/ReleaseNotes.rst8
-rw-r--r--clang/include/clang/Driver/Options.td5
-rw-r--r--clang/include/clang/Format/Format.h123
-rw-r--r--clang/lib/CodeGen/CGClass.cpp32
-rw-r--r--clang/lib/CodeGen/CGExpr.cpp43
-rw-r--r--clang/lib/CodeGen/CodeGenModule.cpp11
-rw-r--r--clang/lib/Format/ContinuationIndenter.cpp80
-rw-r--r--clang/lib/Format/Format.cpp44
-rw-r--r--clang/lib/Format/FormatToken.h16
-rw-r--r--clang/lib/Format/TokenAnnotator.cpp95
-rw-r--r--clang/lib/Format/UnwrappedLineParser.cpp372
-rw-r--r--clang/lib/Format/UnwrappedLineParser.h22
-rw-r--r--clang/lib/Sema/SemaCast.cpp2
-rw-r--r--clang/lib/Sema/SemaChecking.cpp2
-rw-r--r--clang/lib/Sema/SemaCoroutine.cpp2
-rw-r--r--clang/test/Analysis/CFContainers-invalid.c2
-rw-r--r--clang/test/Analysis/CGColorSpace.c4
-rw-r--r--clang/test/Analysis/Checkers/RunLoopAutoreleaseLeakChecker.m28
-rw-r--r--clang/test/Analysis/DeallocUseAfterFreeErrors.m2
-rw-r--r--clang/test/Analysis/Inputs/ctu-other.c2
-rw-r--r--clang/test/Analysis/NSContainers.m16
-rw-r--r--clang/test/Analysis/NSString.m24
-rw-r--r--clang/test/Analysis/NSWindow.m8
-rw-r--r--clang/test/Analysis/NoReturn.m8
-rw-r--r--clang/test/Analysis/OSAtomic_mac.c4
-rw-r--r--clang/test/Analysis/UserNullabilityAnnotations.m2
-rw-r--r--clang/test/Analysis/_Bool-increment-decrement.c6
-rw-r--r--clang/test/Analysis/analyzer-display-progress.m2
-rw-r--r--clang/test/Analysis/analyzer-stats.c8
-rw-r--r--clang/test/Analysis/arc-zero-init.m8
-rw-r--r--clang/test/Analysis/array-struct-region.c22
-rw-r--r--clang/test/Analysis/array-struct-region.cpp12
-rw-r--r--clang/test/Analysis/array-struct.c32
-rw-r--r--clang/test/Analysis/assume-controlled-environment.c2
-rw-r--r--clang/test/Analysis/blocks-no-inline.c6
-rw-r--r--clang/test/Analysis/blocks-nrvo.c2
-rw-r--r--clang/test/Analysis/blocks.m34
-rw-r--r--clang/test/Analysis/bsd-string.c22
-rw-r--r--clang/test/Analysis/bstring.c82
-rw-r--r--clang/test/Analysis/bug_hash_test.m2
-rw-r--r--clang/test/Analysis/c11lock.c8
-rw-r--r--clang/test/Analysis/call-and-message.c2
-rw-r--r--clang/test/Analysis/call-and-message.m2
-rw-r--r--clang/test/Analysis/casts.c20
-rw-r--r--clang/test/Analysis/casts.m2
-rw-r--r--clang/test/Analysis/cert/env34-c.c34
-rw-r--r--clang/test/Analysis/cfg.c2
-rw-r--r--clang/test/Analysis/class-object-state-dump.m2
-rw-r--r--clang/test/Analysis/compound-literals.c2
-rw-r--r--clang/test/Analysis/concrete-address.c2
-rw-r--r--clang/test/Analysis/constant-folding.c2
-rw-r--r--clang/test/Analysis/constraint-assignor.c2
-rw-r--r--clang/test/Analysis/conversion-tracking-notes.c2
-rw-r--r--clang/test/Analysis/conversion.c36
-rw-r--r--clang/test/Analysis/copypaste/generic.c6
-rw-r--r--clang/test/Analysis/coverage.c2
-rw-r--r--clang/test/Analysis/crash-trace.c2
-rw-r--r--clang/test/Analysis/cstring-plist.c2
-rw-r--r--clang/test/Analysis/cstring-ranges.c2
-rw-r--r--clang/test/Analysis/cstring-syntax-weird2.c4
-rw-r--r--clang/test/Analysis/ctu-main.c12
-rw-r--r--clang/test/Analysis/dead-stores.c62
-rw-r--r--clang/test/Analysis/dead-stores.m12
-rw-r--r--clang/test/Analysis/debug-exprinspection-istainted.c4
-rw-r--r--clang/test/Analysis/default-analyze.m4
-rw-r--r--clang/test/Analysis/default-diagnostic-visitors.c2
-rw-r--r--clang/test/Analysis/designated-initializer-values.c6
-rw-r--r--clang/test/Analysis/designated-initializer.c6
-rw-r--r--clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif2
-rw-r--r--clang/test/Analysis/diagnostics/deref-track-symbolic-region.c2
-rw-r--r--clang/test/Analysis/diagnostics/false-positive-suppression.c4
-rw-r--r--clang/test/Analysis/diagnostics/find_last_store.c6
-rw-r--r--clang/test/Analysis/diagnostics/macro-null-return-suppression.cpp12
-rw-r--r--clang/test/Analysis/diagnostics/no-prune-paths.c6
-rw-r--r--clang/test/Analysis/diagnostics/no-store-func-path-notes.c32
-rw-r--r--clang/test/Analysis/diagnostics/no-store-func-path-notes.m4
-rw-r--r--clang/test/Analysis/diagnostics/plist-multi-file.c2
-rw-r--r--clang/test/Analysis/diagnostics/sarif-multi-diagnostic-test.c2
-rw-r--r--clang/test/Analysis/diagnostics/shortest-path-suppression.c4
-rw-r--r--clang/test/Analysis/diagnostics/text-diagnostics.c2
-rw-r--r--clang/test/Analysis/diagnostics/undef-value-callee.h2
-rw-r--r--clang/test/Analysis/diagnostics/undef-value-param.c2
-rw-r--r--clang/test/Analysis/disable-all-checks.c2
-rw-r--r--clang/test/Analysis/dispatch-once.m22
-rw-r--r--clang/test/Analysis/domtest.c10
-rw-r--r--clang/test/Analysis/double-ranges-bug.c2
-rw-r--r--clang/test/Analysis/dump_egraph.c4
-rw-r--r--clang/test/Analysis/elementtype.c2
-rw-r--r--clang/test/Analysis/enum-cast-out-of-range.c4
-rw-r--r--clang/test/Analysis/equality_tracking.c4
-rw-r--r--clang/test/Analysis/exercise-ps.c2
-rw-r--r--clang/test/Analysis/explain-svals.m2
-rw-r--r--clang/test/CodeGenCUDA/amdgpu-asan-printf.cu17
-rw-r--r--clang/test/CodeGenCUDA/amdgpu-asan.cu6
-rw-r--r--clang/test/Driver/cl-options.c2
-rw-r--r--clang/test/PCH/decl-in-prototype.c2
-rw-r--r--clang/test/PCH/designated-init.c.h4
-rw-r--r--clang/test/PCH/different-diagnostic-level.c2
-rw-r--r--clang/test/PCH/different-linker-version.c2
-rw-r--r--clang/test/PCH/emit-dependencies.c2
-rw-r--r--clang/test/PCH/enum.c2
-rw-r--r--clang/test/PCH/exprs.c2
-rw-r--r--clang/test/PCH/externally-retained.m2
-rw-r--r--clang/test/PCH/field-designator.c2
-rw-r--r--clang/test/PCH/format-strings.c2
-rw-r--r--clang/test/PCH/multiple-include-pch.c2
-rw-r--r--clang/test/PCH/nonvisible-external-defs.h2
-rw-r--r--clang/test/PCH/objc_container.h2
-rw-r--r--clang/test/PCH/objc_import.m2
-rw-r--r--clang/test/PCH/objc_literals.m8
-rw-r--r--clang/test/PCH/objc_methods.m2
-rw-r--r--clang/test/PCH/objc_property.m2
-rw-r--r--clang/test/PCH/pch-dir.c2
-rw-r--r--clang/test/PCH/pragma-diag.c2
-rw-r--r--clang/test/PCH/pragma-optimize.c2
-rw-r--r--clang/test/PCH/rdar8852495.c2
-rw-r--r--clang/test/PCH/struct.c4
-rw-r--r--clang/test/PCH/subscripting-literals.m2
-rw-r--r--clang/test/PCH/typo.m2
-rw-r--r--clang/test/PCH/undefined-internal.c8
-rw-r--r--clang/test/Preprocessor/extension-warning.c2
-rw-r--r--clang/test/Preprocessor/macro_raw_string.cpp2
-rw-r--r--clang/test/Preprocessor/pragma_assume_nonnull.c2
-rw-r--r--clang/test/Preprocessor/pragma_microsoft.c4
-rw-r--r--clang/test/Preprocessor/user_defined_system_framework.c2
-rw-r--r--clang/test/Profile/c-captured.c2
-rw-r--r--clang/test/Profile/c-collision.c2
-rw-r--r--clang/test/Profile/c-general.c22
-rw-r--r--clang/test/Profile/c-outdated-data.c4
-rw-r--r--clang/test/Profile/c-unreachable-after-switch.c2
-rw-r--r--clang/test/Profile/coverage-prefix-map.c2
-rw-r--r--clang/test/Profile/gcc-flag-compatibility-aix.c2
-rw-r--r--clang/test/Profile/gcc-flag-compatibility.c2
-rw-r--r--clang/test/Refactor/Extract/ExtractionSemicolonPolicy.m6
-rw-r--r--clang/test/Rewriter/blockstruct.m4
-rw-r--r--clang/test/Rewriter/crash.m4
-rw-r--r--clang/test/Rewriter/finally.m6
-rw-r--r--clang/test/Rewriter/objc-synchronized-1.m8
-rw-r--r--clang/test/Rewriter/rewrite-captured-nested-bvar.c4
-rw-r--r--clang/test/Rewriter/rewrite-foreach-1.m2
-rw-r--r--clang/test/Rewriter/rewrite-foreach-2.m6
-rw-r--r--clang/test/Rewriter/rewrite-foreach-3.m2
-rw-r--r--clang/test/Rewriter/rewrite-foreach-4.m2
-rw-r--r--clang/test/Rewriter/rewrite-foreach-7.m2
-rw-r--r--clang/test/Rewriter/rewrite-modern-synchronized.m10
-rw-r--r--clang/test/Rewriter/rewrite-modern-throw.m12
-rw-r--r--clang/test/Rewriter/rewrite-modern-try-catch-finally.m10
-rw-r--r--clang/test/Rewriter/rewrite-modern-try-finally.m10
-rw-r--r--clang/test/Rewriter/rewrite-try-catch.m4
-rw-r--r--clang/test/Rewriter/rewrite-weak-attr.m4
-rw-r--r--clang/test/Rewriter/undef-field-reference-1.m2
-rw-r--r--clang/test/Rewriter/weak_byref_objects.m4
-rw-r--r--clang/test/VFS/framework-import.m2
-rw-r--r--clang/test/VFS/implicit-include.c2
-rw-r--r--clang/test/VFS/include-mixed-real-and-virtual.c2
-rw-r--r--clang/test/VFS/include-real-from-virtual.c2
-rw-r--r--clang/test/VFS/include-virtual-from-real.c2
-rw-r--r--clang/test/VFS/include.c2
-rw-r--r--clang/test/VFS/module-import.m2
-rw-r--r--clang/test/VFS/relative-path.c2
-rw-r--r--clang/test/VFS/vfsroot-with-overlay.c2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/check-globals.c4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected2
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c.expected4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c.expected4
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c6
-rw-r--r--clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c.expected6
-rw-r--r--clang/test/utils/update_cc_test_checks/check-globals.test8
-rw-r--r--clang/unittests/Format/FormatTest.cpp735
-rw-r--r--clang/unittests/Format/TokenAnnotatorTest.cpp263
-rw-r--r--compiler-rt/cmake/Modules/AddCompilerRT.cmake1
-rw-r--r--compiler-rt/lib/asan/asan_linux.cpp27
-rw-r--r--compiler-rt/lib/asan/asan_poisoning.cpp4
-rw-r--r--compiler-rt/lib/asan/asan_report.cpp6
-rw-r--r--compiler-rt/lib/asan/asan_rtl.cpp5
-rw-r--r--compiler-rt/lib/hwasan/hwasan.cpp1
-rw-r--r--compiler-rt/lib/lsan/lsan.cpp5
-rw-r--r--compiler-rt/lib/memprof/memprof_rtl.cpp2
-rw-r--r--compiler-rt/lib/msan/msan.cpp8
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common.h3
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp11
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_file.cpp1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_file.h1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp25
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp1
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/sanitizer_win.cpp2
-rw-r--r--compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp14
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp10
-rw-r--r--compiler-rt/lib/tsan/rtl/tsan_rtl.cpp1
-rw-r--r--compiler-rt/lib/ubsan/ubsan_init.cpp7
-rw-r--r--flang/include/flang/Optimizer/Dialect/FIRType.h7
-rw-r--r--flang/lib/Evaluate/check-expression.cpp8
-rw-r--r--flang/lib/Evaluate/tools.cpp13
-rw-r--r--flang/lib/Optimizer/CodeGen/CodeGen.cpp25
-rw-r--r--flang/lib/Semantics/check-declarations.cpp1
-rw-r--r--flang/lib/Semantics/program-tree.cpp37
-rw-r--r--flang/lib/Semantics/program-tree.h11
-rw-r--r--flang/lib/Semantics/resolve-names.cpp12
-rw-r--r--flang/runtime/transformational.cpp17
-rw-r--r--flang/test/Fir/alloc.fir83
-rw-r--r--flang/test/Fir/convert-to-llvm.fir4
-rw-r--r--flang/test/Semantics/assign03.f903
-rw-r--r--flang/test/Semantics/associated.f9011
-rw-r--r--flang/test/Semantics/reshape.f904
-rw-r--r--flang/test/Semantics/resolve18.f9026
-rw-r--r--flang/test/Semantics/resolve69.f9012
-rw-r--r--libc/config/linux/aarch64/entrypoints.txt10
-rw-r--r--libc/config/linux/x86_64/entrypoints.txt29
-rw-r--r--libc/src/__support/CPP/CMakeLists.txt16
-rw-r--r--libc/src/stdlib/CMakeLists.txt14
-rw-r--r--libc/test/src/__support/CMakeLists.txt1
-rw-r--r--libc/test/src/__support/CPP/CMakeLists.txt (renamed from libc/test/utils/CPP/CMakeLists.txt)20
-rw-r--r--libc/test/src/__support/CPP/arrayref_test.cpp (renamed from libc/test/utils/CPP/arrayref_test.cpp)0
-rw-r--r--libc/test/src/__support/CPP/bitset_test.cpp (renamed from libc/test/utils/CPP/bitset_test.cpp)0
-rw-r--r--libc/test/src/__support/CPP/limits_test.cpp (renamed from libc/test/utils/CPP/limits_test.cpp)0
-rw-r--r--libc/test/src/__support/CPP/stringview_test.cpp (renamed from libc/test/utils/CPP/stringview_test.cpp)0
-rw-r--r--libc/test/src/__support/CPP/vector_test.cpp (renamed from libc/test/utils/CPP/vector_test.cpp)0
-rw-r--r--libc/test/utils/CMakeLists.txt1
-rw-r--r--libcxx/cmake/caches/Generic-asan.cmake2
-rw-r--r--libcxx/docs/Status/Cxx2bIssues.csv4
-rw-r--r--libcxx/include/CMakeLists.txt1
-rw-r--r--libcxx/include/__algorithm/in_fun_result.h4
-rw-r--r--libcxx/include/__algorithm/in_in_out_result.h6
-rw-r--r--libcxx/include/__algorithm/in_in_result.h4
-rw-r--r--libcxx/include/__algorithm/in_out_out_result.h6
-rw-r--r--libcxx/include/__algorithm/in_out_result.h4
-rw-r--r--libcxx/include/__algorithm/ranges_swap_ranges.h1
-rw-r--r--libcxx/include/__config22
-rw-r--r--libcxx/include/__format/format_arg.h1
-rw-r--r--libcxx/include/__format/format_context.h2
-rw-r--r--libcxx/include/__format/formatter_bool.h4
-rw-r--r--libcxx/include/__functional_base32
-rw-r--r--libcxx/include/__iterator/counted_iterator.h2
-rw-r--r--libcxx/include/__memory/shared_ptr.h1
-rw-r--r--libcxx/include/__memory/unique_ptr.h1
-rw-r--r--libcxx/include/__ranges/access.h1
-rw-r--r--libcxx/include/__ranges/copyable_box.h4
-rw-r--r--libcxx/include/__ranges/drop_view.h2
-rw-r--r--libcxx/include/__ranges/join_view.h4
-rw-r--r--libcxx/include/__ranges/reverse_view.h4
-rw-r--r--libcxx/include/__ranges/subrange.h6
-rw-r--r--libcxx/include/__ranges/take_view.h4
-rw-r--r--libcxx/include/__ranges/transform_view.h4
-rw-r--r--libcxx/include/__threading_support56
-rw-r--r--libcxx/include/bitset1
-rw-r--r--libcxx/include/experimental/__memory1
-rw-r--r--libcxx/include/iterator14
-rw-r--r--libcxx/include/memory9
-rw-r--r--libcxx/include/module.modulemap5
-rw-r--r--libcxx/include/optional6
-rw-r--r--libcxx/include/string11
-rw-r--r--libcxx/include/system_error1
-rw-r--r--libcxx/include/thread1
-rw-r--r--libcxx/include/tuple12
-rw-r--r--libcxx/include/typeindex12
-rw-r--r--libcxx/include/variant11
-rw-r--r--libcxx/include/vector11
-rw-r--r--libcxx/test/libcxx/utilities/format/format.string/format.string.std/std_format_spec_bool.pass.cpp72
-rw-r--r--libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp8
-rw-r--r--libcxx/test/std/atomics/types.pass.cpp10
-rw-r--r--libcxx/test/std/concepts/concepts.compare/concept.equalitycomparable/equality_comparable_with.compile.pass.cpp8
-rw-r--r--libcxx/test/std/concepts/concepts.object/movable.compile.pass.cpp6
-rw-r--r--libcxx/test/std/language.support/cmp/cmp.concept/three_way_comparable.compile.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bit.pow.two/bit_ceil.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bit.pow.two/bit_floor.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bit.pow.two/bit_width.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bit.pow.two/has_single_bit.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.count/countl_one.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.count/countl_zero.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.count/countr_one.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.count/countr_zero.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.count/popcount.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.rot/rotl.pass.cpp2
-rw-r--r--libcxx/test/std/numerics/bit/bitops.rot/rotr.pass.cpp2
-rw-r--r--libcxx/test/std/ranges/range.access/begin.pass.cpp81
-rw-r--r--libcxx/test/std/ranges/range.access/end.pass.cpp123
-rw-r--r--libcxx/test/std/ranges/range.factories/range.iota.view/iterator/member_typedefs.compile.pass.cpp3
-rw-r--r--libcxx/test/std/ranges/range.factories/range.iota.view/iterator/minus.pass.cpp3
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string.accessors/c_str.pass.cpp23
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string.accessors/data.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string.accessors/get_allocator.pass.cpp23
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/pointer.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_T_size_size.pass.cpp224
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer_size.pass.cpp113
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_size_size.pass.cpp465
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_view.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/string.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_compare/string_view.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size_size.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size_size.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size_size.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size_size.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_find/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_rfind/char_size.pass.cpp25
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size_size.pass.cpp31
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_size.pass.cpp33
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_view_size.pass.cpp29
-rw-r--r--libcxx/test/std/strings/basic.string/string.ops/string_substr/substr.pass.cpp15
-rw-r--r--libcxx/test/std/strings/string.view/trivially_copyable.compile.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.arguments/format.arg/ctor.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.context/format.context/advance_to.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.context/format.context/arg.pass.cpp3
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.context/format.context/ctor.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.context/format.context/locale.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.context/format.context/out.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/types.compile.pass.cpp8
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/advance_to.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/begin.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/ctor.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/end.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/types.compile.pass.cpp2
-rw-r--r--libcxx/test/std/utilities/format/format.functions/format_tests.h50
-rw-r--r--libcxx/test/std/utilities/utility/utility.intcmp/intcmp.fail.cpp8
-rw-r--r--libcxx/test/support/test_macros.h8
-rwxr-xr-xlibcxx/utils/generate_private_header_tests.py2
-rw-r--r--libcxx/utils/libcxx/test/params.py1
-rw-r--r--lld/MachO/Driver.cpp8
-rw-r--r--lld/MachO/LTO.cpp16
-rw-r--r--lld/MachO/MapFile.cpp23
-rw-r--r--lld/test/MachO/lto-internalize.ll43
-rw-r--r--lld/test/MachO/map-file.s61
-rw-r--r--lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp4
-rw-r--r--llvm/cmake/config-ix.cmake7
-rw-r--r--llvm/docs/LangRef.rst40
-rw-r--r--llvm/docs/SourceLevelDebugging.rst5
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolution.h15
-rw-r--r--llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h5
-rw-r--r--llvm/include/llvm/BinaryFormat/Swift.def2
-rw-r--r--llvm/include/llvm/CodeGen/ISDOpcodes.h14
-rw-r--r--llvm/include/llvm/CodeGen/MachineRegisterInfo.h19
-rw-r--r--llvm/include/llvm/CodeGen/TargetLowering.h4
-rw-r--r--llvm/include/llvm/IR/DIBuilder.h17
-rw-r--r--llvm/include/llvm/IR/InstrTypes.h18
-rw-r--r--llvm/include/llvm/IR/Intrinsics.td6
-rw-r--r--llvm/include/llvm/ProfileData/SampleProf.h2
-rw-r--r--llvm/include/llvm/Support/TargetOpcodes.def3
-rw-r--r--llvm/include/llvm/Target/GenericOpcodes.td6
-rw-r--r--llvm/include/llvm/Target/Target.td22
-rw-r--r--llvm/include/llvm/Target/TargetSelectionDAG.td4
-rw-r--r--llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h31
-rw-r--r--llvm/lib/Analysis/ScalarEvolution.cpp119
-rw-r--r--llvm/lib/CodeGen/CodeGenCommonISel.cpp6
-rw-r--r--llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp17
-rw-r--r--llvm/lib/CodeGen/MachineSink.cpp32
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp129
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp23
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp4
-rw-r--r--llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp4
-rw-r--r--llvm/lib/CodeGen/TargetLoweringBase.cpp6
-rw-r--r--llvm/lib/IR/DIBuilder.cpp16
-rw-r--r--llvm/lib/IR/Verifier.cpp21
-rw-r--r--llvm/lib/ProfileData/ProfileSummaryBuilder.cpp6
-rw-r--r--llvm/lib/ProfileData/SampleProf.cpp8
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.cpp124
-rw-r--r--llvm/lib/Target/AArch64/AArch64ISelLowering.h8
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.cpp42
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.h27
-rw-r--r--llvm/lib/Target/AArch64/AArch64InstrInfo.td13
-rw-r--r--llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp133
-rw-r--r--llvm/lib/Target/AArch64/AArch64StackTagging.cpp112
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPU.td6
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAttributes.def1
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp48
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUGISel.td3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp9
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp28
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp3
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp2
-rw-r--r--llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp11
-rw-r--r--llvm/lib/Target/AMDGPU/GCNSubtarget.h8
-rw-r--r--llvm/lib/Target/AMDGPU/SIISelLowering.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstrInfo.td8
-rw-r--r--llvm/lib/Target/AMDGPU/SIInstructions.td28
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp4
-rw-r--r--llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h5
-rw-r--r--llvm/lib/Target/AMDGPU/SIModeRegister.cpp16
-rw-r--r--llvm/lib/Target/AMDGPU/SIRegisterInfo.td17
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp20
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h5
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.cpp144
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h38
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.cpp117
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h16
-rw-r--r--llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/Hexagon/HexagonPatternsHVX.td9
-rw-r--r--llvm/lib/Target/M68k/CMakeLists.txt1
-rw-r--r--llvm/lib/Target/M68k/M68kISelLowering.cpp105
-rw-r--r--llvm/lib/Target/M68k/M68kISelLowering.h2
-rw-r--r--llvm/lib/Target/M68k/M68kInstrArithmetic.td713
-rw-r--r--llvm/lib/Target/M68k/M68kInstrFormats.td103
-rw-r--r--llvm/lib/Target/M68k/M68kInstrInfo.td38
-rw-r--r--llvm/lib/Target/M68k/M68kRegisterInfo.h8
-rw-r--r--llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp50
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.cpp8
-rw-r--r--llvm/lib/Target/RISCV/RISCVFrameLowering.h2
-rw-r--r--llvm/lib/Target/RISCV/RISCVISelLowering.cpp42
-rw-r--r--llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp36
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfo.cpp2
-rw-r--r--llvm/lib/Target/RISCV/RISCVInstrInfoZb.td8
-rw-r--r--llvm/lib/Target/Sparc/SparcISelLowering.cpp9
-rw-r--r--llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp24
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp11
-rw-r--r--llvm/lib/Target/SystemZ/SystemZInstrInfo.td24
-rw-r--r--llvm/lib/Target/SystemZ/SystemZScheduleZ13.td4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZScheduleZ14.td4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZScheduleZ15.td4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZScheduleZ196.td4
-rw-r--r--llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td4
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.cpp483
-rw-r--r--llvm/lib/Target/X86/X86ISelLowering.h3
-rw-r--r--llvm/lib/Target/X86/X86InstrAVX512.td2
-rw-r--r--llvm/lib/Target/X86/X86InstrFragmentsSIMD.td1
-rw-r--r--llvm/lib/Target/X86/X86InstrSSE.td4
-rw-r--r--llvm/lib/Target/X86/X86IntrinsicsInfo.h12
-rw-r--r--llvm/lib/Transforms/Coroutines/CoroFrame.cpp25
-rw-r--r--llvm/lib/Transforms/IPO/AlwaysInliner.cpp18
-rw-r--r--llvm/lib/Transforms/IPO/OpenMPOpt.cpp1
-rw-r--r--llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp115
-rw-r--r--llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp16
-rw-r--r--llvm/lib/Transforms/Scalar/ConstraintElimination.cpp6
-rw-r--r--llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp10
-rw-r--r--llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp3
-rw-r--r--llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp51
-rw-r--r--llvm/test/Analysis/ScalarEvolution/logical-operations.ll162
-rw-r--r--llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll170
-rw-r--r--llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir4
-rw-r--r--llvm/test/CodeGen/AArch64/addsub.ll214
-rw-r--r--llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll15
-rw-r--r--llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll40
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.gfx.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir384
-rw-r--r--llvm/test/CodeGen/AMDGPU/accvgpr-spill-scc-clobber.mir4
-rw-r--r--llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll38
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll22
-rw-r--r--llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/fail.llvm.fptrunc.round.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat-scratch.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel-v3.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel.ll5
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v3.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v5.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent-v3.ll51
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent.ll48
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3-asan.ll3
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3.ll55
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present.ll53
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll303
-rw-r--r--llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll301
-rw-r--r--llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll52
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-hi16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/load-lo16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/memory_clause.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/multi-dword-vgpr-spill.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/pei-build-av-spill.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/pei-build-spill-partial-agpr.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/pei-build-spill.mir8
-rw-r--r--llvm/test/CodeGen/AMDGPU/pei-scavenge-sgpr-gfx9.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/pei-scavenge-vgpr-spill.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll18
-rw-r--r--llvm/test/CodeGen/AMDGPU/scratch-simple.ll8
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-spill.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/spill-to-agpr-partial.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/stack-pointer-offset-relative-frameindex.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/store-hi16.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll6
-rw-r--r--llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll2
-rw-r--r--llvm/test/CodeGen/AMDGPU/vector-spill-restore-to-other-vector-type.mir2
-rw-r--r--llvm/test/CodeGen/AMDGPU/vgpr-spill-scc-clobber.mir4
-rw-r--r--llvm/test/CodeGen/M68k/Arith/bitwise.ll120
-rw-r--r--llvm/test/CodeGen/M68k/Control/cmp.ll2
-rw-r--r--llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll55
-rw-r--r--llvm/test/CodeGen/RISCV/double-br-fcmp.ll204
-rw-r--r--llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll8
-rw-r--r--llvm/test/CodeGen/RISCV/float-br-fcmp.ll204
-rw-r--r--llvm/test/CodeGen/RISCV/frame-info.ll46
-rw-r--r--llvm/test/CodeGen/RISCV/half-br-fcmp.ll204
-rw-r--r--llvm/test/CodeGen/RISCV/rv32zbb.ll22
-rw-r--r--llvm/test/CodeGen/RISCV/rv64zbb.ll65
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir136
-rw-r--r--llvm/test/CodeGen/RISCV/shrinkwrap.ll41
-rw-r--r--llvm/test/CodeGen/RISCV/unroll-loop-cse.ll84
-rw-r--r--llvm/test/CodeGen/X86/avg.ll108
-rw-r--r--llvm/test/CodeGen/X86/avx512-insert-extract.ll1
-rw-r--r--llvm/test/CodeGen/X86/combine-pavg.ll34
-rw-r--r--llvm/test/CodeGen/X86/combine-rotates.ll4
-rw-r--r--llvm/test/CodeGen/X86/combine-udiv.ll5
-rw-r--r--llvm/test/CodeGen/X86/extractelement-load.ll151
-rw-r--r--llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll34
-rw-r--r--llvm/test/CodeGen/X86/min-legal-vector-width.ll5
-rw-r--r--llvm/test/CodeGen/X86/oddshuffles.ll3
-rw-r--r--llvm/test/CodeGen/X86/psubus.ll108
-rw-r--r--llvm/test/CodeGen/X86/vector-bo-select.ll203
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-128.ll148
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-256.ll233
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-512.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-128.ll85
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-256.ll11
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-512.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-128.ll87
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-256.ll248
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-512.ll20
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-128.ll80
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-256.ll11
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-512.ll10
-rw-r--r--llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll8
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll65
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll1251
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll17
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll12
-rw-r--r--llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll35
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-128.ll85
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-256.ll6
-rw-r--r--llvm/test/CodeGen/X86/vector-rotate-512.ll10
-rw-r--r--llvm/test/DebugInfo/COFF/compiler-version-overflow.ll2
-rw-r--r--llvm/test/Instrumentation/HWAddressSanitizer/alloca-uninteresting.ll48
-rw-r--r--llvm/test/MC/Disassembler/M68k/arithmetic.txt3
-rw-r--r--llvm/test/TableGen/VarLenEncoder.td93
-rw-r--r--llvm/test/Transforms/ConstraintElimination/wrapping-math.ll202
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug-dbg.addr-swift.ll442
-rw-r--r--llvm/test/Transforms/Coroutines/coro-debug-dbg.addr.ll257
-rw-r--r--llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll33
-rw-r--r--llvm/test/Transforms/Inline/always-inline.ll54
-rw-r--r--llvm/test/Transforms/LoopDistribute/symbolic-stride.ll109
-rw-r--r--llvm/test/Transforms/LoopLoadElim/symbolic-stride.ll371
-rw-r--r--llvm/test/Transforms/OpenMP/barrier_removal.ll10
-rw-r--r--llvm/test/Verifier/llvm.fptrunc.round.ll13
-rw-r--r--llvm/test/tools/dsymutil/Inputs/reflection_metadata.yaml36
-rw-r--r--llvm/test/tools/dsymutil/X86/reflection-dump.test3
-rw-r--r--llvm/test/tools/llvm-profdata/cs-sample-nested-profile.test44
-rw-r--r--llvm/test/tools/llvm-profgen/cs-preinline.test2
-rw-r--r--llvm/unittests/IR/ConstantsTest.cpp9
-rw-r--r--llvm/unittests/IR/DebugInfoTest.cpp39
-rw-r--r--llvm/unittests/Support/CommandLineTest.cpp25
-rw-r--r--llvm/unittests/Support/Host.cpp63
-rw-r--r--llvm/utils/TableGen/CMakeLists.txt1
-rw-r--r--llvm/utils/TableGen/CodeEmitterGen.cpp233
-rw-r--r--llvm/utils/TableGen/CodeGenRegisters.cpp8
-rw-r--r--llvm/utils/TableGen/RegisterInfoEmitter.cpp44
-rw-r--r--llvm/utils/TableGen/VarLenCodeEmitterGen.cpp491
-rw-r--r--llvm/utils/TableGen/VarLenCodeEmitterGen.h25
-rw-r--r--llvm/utils/gn/secondary/libcxx/include/BUILD.gn2
-rw-r--r--llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/Utils/BUILD.gn1
-rw-r--r--llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn1
-rw-r--r--mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp11
-rw-r--r--mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp5
-rw-r--r--mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp24
-rw-r--r--mlir/lib/IR/AffineMap.cpp16
-rw-r--r--mlir/lib/IR/AffineMapDetail.h27
-rw-r--r--openmp/runtime/src/kmp_settings.cpp14
-rw-r--r--openmp/runtime/test/tasking/hidden_helper_task/single_helper_thread.c21
-rw-r--r--polly/test/ForwardOpTree/changed-kind.ll9
-rw-r--r--test/.gitattributes19
-rw-r--r--utils/bazel/README.md25
-rw-r--r--utils/bazel/llvm-project-overlay/clang/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/lld/BUILD.bazel1
-rw-r--r--utils/bazel/llvm-project-overlay/llvm/BUILD.bazel15
632 files changed, 12766 insertions, 6499 deletions
diff --git a/clang-tools-extra/clang-doc/Serialize.cpp b/clang-tools-extra/clang-doc/Serialize.cpp
index 29762b6b54b1..c567cda9b6e2 100644
--- a/clang-tools-extra/clang-doc/Serialize.cpp
+++ b/clang-tools-extra/clang-doc/Serialize.cpp
@@ -357,7 +357,7 @@ template <typename T>
static void
populateParentNamespaces(llvm::SmallVector<Reference, 4> &Namespaces,
const T *D, bool &IsInAnonymousNamespace) {
- const auto *DC = dyn_cast<DeclContext>(D);
+ const auto *DC = cast<DeclContext>(D);
while ((DC = DC->getParent())) {
if (const auto *N = dyn_cast<NamespaceDecl>(DC)) {
std::string Namespace;
diff --git a/clang-tools-extra/clang-tidy/bugprone/VirtualNearMissCheck.cpp b/clang-tools-extra/clang-tidy/bugprone/VirtualNearMissCheck.cpp
index 150b517811b6..f2aeac243095 100644
--- a/clang-tools-extra/clang-tidy/bugprone/VirtualNearMissCheck.cpp
+++ b/clang-tools-extra/clang-tidy/bugprone/VirtualNearMissCheck.cpp
@@ -40,11 +40,11 @@ static bool checkOverridingFunctionReturnType(const ASTContext *Context,
const CXXMethodDecl *BaseMD,
const CXXMethodDecl *DerivedMD) {
QualType BaseReturnTy = BaseMD->getType()
- ->getAs<FunctionType>()
+ ->castAs<FunctionType>()
->getReturnType()
.getCanonicalType();
QualType DerivedReturnTy = DerivedMD->getType()
- ->getAs<FunctionType>()
+ ->castAs<FunctionType>()
->getReturnType()
.getCanonicalType();
diff --git a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProBoundsConstantArrayIndexCheck.cpp b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProBoundsConstantArrayIndexCheck.cpp
index 59886ee4a3eb..d7bc56511dfb 100644
--- a/clang-tools-extra/clang-tidy/cppcoreguidelines/ProBoundsConstantArrayIndexCheck.cpp
+++ b/clang-tools-extra/clang-tidy/cppcoreguidelines/ProBoundsConstantArrayIndexCheck.cpp
@@ -71,7 +71,7 @@ void ProBoundsConstantArrayIndexCheck::check(
BaseRange = ArraySubscriptE->getBase()->getSourceRange();
else
BaseRange =
- dyn_cast<CXXOperatorCallExpr>(Matched)->getArg(0)->getSourceRange();
+ cast<CXXOperatorCallExpr>(Matched)->getArg(0)->getSourceRange();
SourceRange IndexRange = IndexExpr->getSourceRange();
auto Diag = diag(Matched->getExprLoc(),
diff --git a/clang-tools-extra/clang-tidy/llvmlibc/CalleeNamespaceCheck.cpp b/clang-tools-extra/clang-tidy/llvmlibc/CalleeNamespaceCheck.cpp
index 0636883c4819..91e9e026d0ad 100644
--- a/clang-tools-extra/clang-tidy/llvmlibc/CalleeNamespaceCheck.cpp
+++ b/clang-tools-extra/clang-tidy/llvmlibc/CalleeNamespaceCheck.cpp
@@ -22,7 +22,7 @@ namespace llvm_libc {
// Unit.
const DeclContext *getOutermostNamespace(const DeclContext *Decl) {
const DeclContext *Parent = Decl->getParent();
- if (Parent && Parent->isTranslationUnit())
+ if (Parent->isTranslationUnit())
return Decl;
return getOutermostNamespace(Parent);
}
diff --git a/clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.cpp b/clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.cpp
index c4e7f12e74ac..0043eb0dab9a 100644
--- a/clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.cpp
+++ b/clang-tools-extra/clang-tidy/modernize/UseNoexceptCheck.cpp
@@ -65,7 +65,7 @@ void UseNoexceptCheck::check(const MatchFinder::MatchResult &Result) {
} else if (const auto *ParmDecl =
Result.Nodes.getNodeAs<ParmVarDecl>("parmVarDecl")) {
FnTy = ParmDecl->getType()
- ->getAs<Type>()
+ ->castAs<Type>()
->getPointeeType()
->getAs<FunctionProtoType>();
diff --git a/clang-tools-extra/clang-tidy/performance/NoexceptMoveConstructorCheck.cpp b/clang-tools-extra/clang-tidy/performance/NoexceptMoveConstructorCheck.cpp
index f87f214945fa..782498bd69e9 100644
--- a/clang-tools-extra/clang-tidy/performance/NoexceptMoveConstructorCheck.cpp
+++ b/clang-tools-extra/clang-tidy/performance/NoexceptMoveConstructorCheck.cpp
@@ -38,7 +38,7 @@ void NoexceptMoveConstructorCheck::check(
return;
}
- const auto *ProtoType = Decl->getType()->getAs<FunctionProtoType>();
+ const auto *ProtoType = Decl->getType()->castAs<FunctionProtoType>();
if (isUnresolvedExceptionSpec(ProtoType->getExceptionSpecType()))
return;
diff --git a/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp b/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
index 548fed9a47c3..d399c957c7c7 100644
--- a/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/ContainerSizeEmptyCheck.cpp
@@ -218,23 +218,22 @@ void ContainerSizeEmptyCheck::check(const MatchFinder::MatchResult &Result) {
Hint = FixItHint::CreateReplacement(BinCmpRewritten->getSourceRange(),
ReplacementText);
} else if (BinaryOp) { // Determine the correct transformation.
+ const auto *LiteralLHS =
+ llvm::dyn_cast<IntegerLiteral>(BinaryOp->getLHS()->IgnoreImpCasts());
+ const auto *LiteralRHS =
+ llvm::dyn_cast<IntegerLiteral>(BinaryOp->getRHS()->IgnoreImpCasts());
+ const bool ContainerIsLHS = !LiteralLHS;
+
+ uint64_t Value = 0;
+ if (LiteralLHS)
+ Value = LiteralLHS->getValue().getLimitedValue();
+ else if (LiteralRHS)
+ Value = LiteralRHS->getValue().getLimitedValue();
+ else
+ return;
+
bool Negation = false;
- const bool ContainerIsLHS =
- !llvm::isa<IntegerLiteral>(BinaryOp->getLHS()->IgnoreImpCasts());
const auto OpCode = BinaryOp->getOpcode();
- uint64_t Value = 0;
- if (ContainerIsLHS) {
- if (const auto *Literal = llvm::dyn_cast<IntegerLiteral>(
- BinaryOp->getRHS()->IgnoreImpCasts()))
- Value = Literal->getValue().getLimitedValue();
- else
- return;
- } else {
- Value =
- llvm::dyn_cast<IntegerLiteral>(BinaryOp->getLHS()->IgnoreImpCasts())
- ->getValue()
- .getLimitedValue();
- }
// Constant that is not handled.
if (Value > 1)
diff --git a/clang-tools-extra/clang-tidy/readability/FunctionCognitiveComplexityCheck.cpp b/clang-tools-extra/clang-tidy/readability/FunctionCognitiveComplexityCheck.cpp
index c27733c04083..40542cca54a4 100644
--- a/clang-tools-extra/clang-tidy/readability/FunctionCognitiveComplexityCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/FunctionCognitiveComplexityCheck.cpp
@@ -444,8 +444,7 @@ public:
// A little beautification.
// For conditional operator "cond ? true : false" point at the "?"
// symbol.
- ConditionalOperator *COp = dyn_cast<ConditionalOperator>(Node);
- Location = COp->getQuestionLoc();
+ Location = cast<ConditionalOperator>(Node)->getQuestionLoc();
}
// If we have found any reasons, let's account it.
diff --git a/clang-tools-extra/clang-tidy/readability/RedundantControlFlowCheck.cpp b/clang-tools-extra/clang-tidy/readability/RedundantControlFlowCheck.cpp
index 6af77635aa2b..dd63de3f048e 100644
--- a/clang-tools-extra/clang-tidy/readability/RedundantControlFlowCheck.cpp
+++ b/clang-tools-extra/clang-tidy/readability/RedundantControlFlowCheck.cpp
@@ -79,7 +79,7 @@ void RedundantControlFlowCheck::issueDiagnostic(
SourceLocation Start;
if (Previous != Block->body_rend())
Start = Lexer::findLocationAfterToken(
- dyn_cast<Stmt>(*Previous)->getEndLoc(), tok::semi, SM, getLangOpts(),
+ cast<Stmt>(*Previous)->getEndLoc(), tok::semi, SM, getLangOpts(),
/*SkipTrailingWhitespaceAndNewLine=*/true);
if (!Start.isValid())
Start = StmtRange.getBegin();
diff --git a/clang/docs/ClangFormatStyleOptions.rst b/clang/docs/ClangFormatStyleOptions.rst
index d610c19faf2b..e89523d0e567 100644
--- a/clang/docs/ClangFormatStyleOptions.rst
+++ b/clang/docs/ClangFormatStyleOptions.rst
@@ -1988,17 +1988,33 @@ the configuration (without a prefix: ``Auto``).
-**BreakBeforeConceptDeclarations** (``Boolean``) :versionbadge:`clang-format 13`
- If ``true``, concept will be placed on a new line.
+**BreakBeforeConceptDeclarations** (``BreakBeforeConceptDeclarationsStyle``) :versionbadge:`clang-format 13`
+ The concept declaration style to use.
- .. code-block:: c++
+ Possible values:
+
+ * ``BBCDS_Never`` (in configuration: ``Never``)
+ Keep the template declaration line together with ``concept``.
+
+ .. code-block:: c++
+
+ template <typename T> concept C = ...;
+
+ * ``BBCDS_Allowed`` (in configuration: ``Allowed``)
+ Breaking between template declaration and ``concept`` is allowed. The
+ actual behavior depends on the content and line breaking rules and
+ penalities.
+
+ * ``BBCDS_Always`` (in configuration: ``Always``)
+ Always break before ``concept``, putting it in the line after the
+ template declaration.
+
+ .. code-block:: c++
+
+ template <typename T>
+ concept C = ...;
- true:
- template<typename T>
- concept ...
- false:
- template<typename T> concept ...
**BreakBeforeTernaryOperators** (``Boolean``) :versionbadge:`clang-format 3.7`
If ``true``, ternary operators will be placed after line breaks.
@@ -2690,8 +2706,9 @@ the configuration (without a prefix: ``Auto``).
-**IndentRequires** (``Boolean``) :versionbadge:`clang-format 13`
- Indent the requires clause in a template
+**IndentRequiresClause** (``Boolean``) :versionbadge:`clang-format 13`
+ Indent the requires clause in a template. This only applies when
+ ``RequiresClausePosition`` is ``OwnLine``, or ``WithFollowing``.
.. code-block:: c++
@@ -3474,6 +3491,92 @@ the configuration (without a prefix: ``Auto``).
}
}
+**RequiresClausePosition** (``RequiresClausePositionStyle``) :versionbadge:`clang-format 15`
+ The position of the ``requires`` clause.
+
+ Possible values:
+
+ * ``RCPS_OwnLine`` (in configuration: ``OwnLine``)
+ Always put the ``requires`` clause on its own line.
+
+ .. code-block:: c++
+
+ template <typename T>
+ requires C<T>
+ struct Foo {...
+
+ template <typename T>
+ requires C<T>
+ void bar(T t) {...
+
+ template <typename T>
+ void baz(T t)
+ requires C<T>
+ {...
+
+ * ``RCPS_WithPreceding`` (in configuration: ``WithPreceding``)
+ Try to put the clause together with the preceding part of a declaration.
+ For class templates: stick to the template declaration.
+ For function templates: stick to the template declaration.
+ For function declaration followed by a requires clause: stick to the
+ parameter list.
+
+ .. code-block:: c++
+
+ template <typename T> requires C<T>
+ struct Foo {...
+
+ template <typename T> requires C<T>
+ void bar(T t) {...
+
+ template <typename T>
+ void baz(T t) requires C<T>
+ {...
+
+ * ``RCPS_WithFollowing`` (in configuration: ``WithFollowing``)
+ Try to put the ``requires`` clause together with the class or function
+ declaration.
+
+ .. code-block:: c++
+
+ template <typename T>
+ requires C<T> struct Foo {...
+
+ template <typename T>
+ requires C<T> void bar(T t) {...
+
+ template <typename T>
+ void baz(T t)
+ requires C<T> {...
+
+ * ``RCPS_SingleLine`` (in configuration: ``SingleLine``)
+ Try to put everything in the same line if possible. Otherwise normal
+ line breaking rules take over.
+
+ .. code-block:: c++
+
+ // Fitting:
+ template <typename T> requires C<T> struct Foo {...
+
+ template <typename T> requires C<T> void bar(T t) {...
+
+ template <typename T> void bar(T t) requires C<T> {...
+
+ // Not fitting, one possible example:
+ template <typename LongName>
+ requires C<LongName>
+ struct Foo {...
+
+ template <typename LongName>
+ requires C<LongName>
+ void bar(LongName ln) {
+
+ template <typename LongName>
+ void bar(LongName ln)
+ requires C<LongName> {
+
+
+
**SeparateDefinitionBlocks** (``SeparateDefinitionStyle``) :versionbadge:`clang-format 14`
Specifies the use of empty lines to separate definition blocks, including
classes, structs, enums, and functions.
diff --git a/clang/docs/ReleaseNotes.rst b/clang/docs/ReleaseNotes.rst
index ba8e028eb6ae..4a6395941267 100644
--- a/clang/docs/ReleaseNotes.rst
+++ b/clang/docs/ReleaseNotes.rst
@@ -167,6 +167,14 @@ AST Matchers
clang-format
------------
+- **Important change**: Renamed ``IndentRequires`` to ``IndentRequiresClause``
+ and changed the default for all styles from ``false`` to ``true``.
+
+- Reworked and improved handling of concepts and requires. Added the
+ ``RequiresClausePosition`` option as part of that.
+
+- Changed ``BreakBeforeConceptDeclarations`` from ``Boolean`` to an enum.
+
libclang
--------
diff --git a/clang/include/clang/Driver/Options.td b/clang/include/clang/Driver/Options.td
index 6be3aa117220..cd0d56cecaca 100644
--- a/clang/include/clang/Driver/Options.td
+++ b/clang/include/clang/Driver/Options.td
@@ -1251,9 +1251,10 @@ def fcs_profile_generate_EQ : Joined<["-"], "fcs-profile-generate=">,
Group<f_Group>, Flags<[CoreOption]>, MetaVarName<"<directory>">,
HelpText<"Generate instrumented code to collect context sensitive execution counts into <directory>/default.profraw (overridden by LLVM_PROFILE_FILE env var)">;
def fprofile_use : Flag<["-"], "fprofile-use">, Group<f_Group>,
- Alias<fprofile_instr_use>;
+ Flags<[CoreOption]>, Alias<fprofile_instr_use>;
def fprofile_use_EQ : Joined<["-"], "fprofile-use=">,
- Group<f_Group>, Flags<[NoXarchOption]>, MetaVarName<"<pathname>">,
+ Group<f_Group>, Flags<[NoXarchOption, CoreOption]>,
+ MetaVarName<"<pathname>">,
HelpText<"Use instrumentation data for profile-guided optimization. If pathname is a directory, it reads from <pathname>/default.profdata. Otherwise, it reads from file <pathname>.">;
def fno_profile_instr_generate : Flag<["-"], "fno-profile-instr-generate">,
Group<f_Group>, Flags<[CoreOption]>,
diff --git a/clang/include/clang/Format/Format.h b/clang/include/clang/Format/Format.h
index d4e859f4decc..9d6df403230d 100644
--- a/clang/include/clang/Format/Format.h
+++ b/clang/include/clang/Format/Format.h
@@ -1770,17 +1770,29 @@ struct FormatStyle {
/// \version 3.8
BraceWrappingFlags BraceWrapping;
- /// If ``true``, concept will be placed on a new line.
- /// \code
- /// true:
- /// template<typename T>
- /// concept ...
- ///
- /// false:
- /// template<typename T> concept ...
- /// \endcode
+ /// Different ways to break before concept declarations.
+ enum BreakBeforeConceptDeclarationsStyle {
+ /// Keep the template declaration line together with ``concept``.
+ /// \code
+ /// template <typename T> concept C = ...;
+ /// \endcode
+ BBCDS_Never,
+ /// Breaking between template declaration and ``concept`` is allowed. The
+ /// actual behavior depends on the content and line breaking rules and
+ /// penalities.
+ BBCDS_Allowed,
+ /// Always break before ``concept``, putting it in the line after the
+ /// template declaration.
+ /// \code
+ /// template <typename T>
+ /// concept C = ...;
+ /// \endcode
+ BBCDS_Always,
+ };
+
+ /// The concept declaration style to use.
/// \version 13
- bool BreakBeforeConceptDeclarations;
+ BreakBeforeConceptDeclarationsStyle BreakBeforeConceptDeclarations;
/// If ``true``, ternary operators will be placed after line breaks.
/// \code
@@ -2509,7 +2521,8 @@ struct FormatStyle {
/// \version 12
IndentExternBlockStyle IndentExternBlock;
- /// Indent the requires clause in a template
+ /// Indent the requires clause in a template. This only applies when
+ /// ``RequiresClausePosition`` is ``OwnLine``, or ``WithFollowing``.
/// \code
/// true:
/// template <typename It>
@@ -2526,7 +2539,7 @@ struct FormatStyle {
/// }
/// \endcode
/// \version 13
- bool IndentRequires;
+ bool IndentRequiresClause;
/// The number of columns to use for indentation.
/// \code
@@ -3116,6 +3129,87 @@ struct FormatStyle {
/// \version 14
bool RemoveBracesLLVM;
+ /// \brief The possible positions for the requires clause. The
+ /// ``IndentRequires`` option is only used if the ``requires`` is put on the
+ /// start of a line.
+ enum RequiresClausePositionStyle {
+ /// Always put the ``requires`` clause on its own line.
+ /// \code
+ /// template <typename T>
+ /// requires C<T>
+ /// struct Foo {...
+ ///
+ /// template <typename T>
+ /// requires C<T>
+ /// void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t)
+ /// requires C<T>
+ /// {...
+ /// \endcode
+ RCPS_OwnLine,
+ /// Try to put the clause together with the preceding part of a declaration.
+ /// For class templates: stick to the template declaration.
+ /// For function templates: stick to the template declaration.
+ /// For function declaration followed by a requires clause: stick to the
+ /// parameter list.
+ /// \code
+ /// template <typename T> requires C<T>
+ /// struct Foo {...
+ ///
+ /// template <typename T> requires C<T>
+ /// void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t) requires C<T>
+ /// {...
+ /// \endcode
+ RCPS_WithPreceding,
+ /// Try to put the ``requires`` clause together with the class or function
+ /// declaration.
+ /// \code
+ /// template <typename T>
+ /// requires C<T> struct Foo {...
+ ///
+ /// template <typename T>
+ /// requires C<T> void bar(T t) {...
+ ///
+ /// template <typename T>
+ /// void baz(T t)
+ /// requires C<T> {...
+ /// \endcode
+ RCPS_WithFollowing,
+ /// Try to put everything in the same line if possible. Otherwise normal
+ /// line breaking rules take over.
+ /// \code
+ /// // Fitting:
+ /// template <typename T> requires C<T> struct Foo {...
+ ///
+ /// template <typename T> requires C<T> void bar(T t) {...
+ ///
+ /// template <typename T> void bar(T t) requires C<T> {...
+ ///
+ /// // Not fitting, one possible example:
+ /// template <typename LongName>
+ /// requires C<LongName>
+ /// struct Foo {...
+ ///
+ /// template <typename LongName>
+ /// requires C<LongName>
+ /// void bar(LongName ln) {
+ ///
+ /// template <typename LongName>
+ /// void bar(LongName ln)
+ /// requires C<LongName> {
+ /// \endcode
+ RCPS_SingleLine,
+ };
+
+ /// \brief The position of the ``requires`` clause.
+ /// \version 15
+ RequiresClausePositionStyle RequiresClausePosition;
+
/// \brief The style if definition blocks should be separated.
enum SeparateDefinitionStyle {
/// Leave definition blocks as they are.
@@ -3889,8 +3983,8 @@ struct FormatStyle {
IndentGotoLabels == R.IndentGotoLabels &&
IndentPPDirectives == R.IndentPPDirectives &&
IndentExternBlock == R.IndentExternBlock &&
- IndentRequires == R.IndentRequires && IndentWidth == R.IndentWidth &&
- Language == R.Language &&
+ IndentRequiresClause == R.IndentRequiresClause &&
+ IndentWidth == R.IndentWidth && Language == R.Language &&
IndentWrappedFunctionNames == R.IndentWrappedFunctionNames &&
JavaImportGroups == R.JavaImportGroups &&
JavaScriptQuotes == R.JavaScriptQuotes &&
@@ -3926,6 +4020,7 @@ struct FormatStyle {
RawStringFormats == R.RawStringFormats &&
ReferenceAlignment == R.ReferenceAlignment &&
RemoveBracesLLVM == R.RemoveBracesLLVM &&
+ RequiresClausePosition == R.RequiresClausePosition &&
SeparateDefinitionBlocks == R.SeparateDefinitionBlocks &&
ShortNamespaceLines == R.ShortNamespaceLines &&
SortIncludes == R.SortIncludes &&
diff --git a/clang/lib/CodeGen/CGClass.cpp b/clang/lib/CodeGen/CGClass.cpp
index 76b90924750c..b7d6139f720c 100644
--- a/clang/lib/CodeGen/CGClass.cpp
+++ b/clang/lib/CodeGen/CGClass.cpp
@@ -1649,22 +1649,22 @@ namespace {
}
};
- static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
- CharUnits::QuantityType PoisonSize) {
- CodeGenFunction::SanitizerScope SanScope(&CGF);
- // Pass in void pointer and size of region as arguments to runtime
- // function
- llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
- llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
-
- llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
-
- llvm::FunctionType *FnType =
- llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
- llvm::FunctionCallee Fn =
- CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
- CGF.EmitNounwindRuntimeCall(Fn, Args);
- }
+ static void EmitSanitizerDtorCallback(CodeGenFunction &CGF, llvm::Value *Ptr,
+ CharUnits::QuantityType PoisonSize) {
+ CodeGenFunction::SanitizerScope SanScope(&CGF);
+ // Pass in void pointer and size of region as arguments to runtime
+ // function
+ llvm::Value *Args[] = {CGF.Builder.CreateBitCast(Ptr, CGF.VoidPtrTy),
+ llvm::ConstantInt::get(CGF.SizeTy, PoisonSize)};
+
+ llvm::Type *ArgTypes[] = {CGF.VoidPtrTy, CGF.SizeTy};
+
+ llvm::FunctionType *FnType =
+ llvm::FunctionType::get(CGF.VoidTy, ArgTypes, false);
+ llvm::FunctionCallee Fn =
+ CGF.CGM.CreateRuntimeFunction(FnType, "__sanitizer_dtor_callback");
+ CGF.EmitNounwindRuntimeCall(Fn, Args);
+ }
class SanitizeDtorMembers final : public EHScopeStack::Cleanup {
const CXXDestructorDecl *Dtor;
diff --git a/clang/lib/CodeGen/CGExpr.cpp b/clang/lib/CodeGen/CGExpr.cpp
index 4565f4343aa3..403c60b31816 100644
--- a/clang/lib/CodeGen/CGExpr.cpp
+++ b/clang/lib/CodeGen/CGExpr.cpp
@@ -407,7 +407,7 @@ static Address createReferenceTemporary(CodeGenFunction &CGF,
GV->getValueType()->getPointerTo(
CGF.getContext().getTargetAddressSpace(LangAS::Default)));
// FIXME: Should we put the new global into a COMDAT?
- return Address(C, alignment);
+ return Address(C, GV->getValueType(), alignment);
}
return CGF.CreateMemTemp(Ty, "ref.tmp", Alloca);
}
@@ -441,10 +441,10 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
ownership != Qualifiers::OCL_ExplicitNone) {
Address Object = createReferenceTemporary(*this, M, E);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(Object.getPointer())) {
- Object = Address(llvm::ConstantExpr::getBitCast(Var,
- ConvertTypeForMem(E->getType())
- ->getPointerTo(Object.getAddressSpace())),
- Object.getAlignment());
+ llvm::Type *Ty = ConvertTypeForMem(E->getType());
+ Object = Address(llvm::ConstantExpr::getBitCast(
+ Var, Ty->getPointerTo(Object.getAddressSpace())),
+ Ty, Object.getAlignment());
// createReferenceTemporary will promote the temporary to a global with a
// constant initializer if it can. It can only do this to a value of
@@ -499,9 +499,11 @@ EmitMaterializeTemporaryExpr(const MaterializeTemporaryExpr *M) {
Address Object = createReferenceTemporary(*this, M, E, &Alloca);
if (auto *Var = dyn_cast<llvm::GlobalVariable>(
Object.getPointer()->stripPointerCasts())) {
+ llvm::Type *TemporaryType = ConvertTypeForMem(E->getType());
Object = Address(llvm::ConstantExpr::getBitCast(
cast<llvm::Constant>(Object.getPointer()),
- ConvertTypeForMem(E->getType())->getPointerTo()),
+ TemporaryType->getPointerTo()),
+ TemporaryType,
Object.getAlignment());
// If the temporary is a global and has a constant initializer or is a
// constant temporary that we promoted to a global, we may have already
@@ -1208,9 +1210,10 @@ RValue CodeGenFunction::EmitUnsupportedRValue(const Expr *E,
LValue CodeGenFunction::EmitUnsupportedLValue(const Expr *E,
const char *Name) {
ErrorUnsupported(E, Name);
- llvm::Type *Ty = llvm::PointerType::getUnqual(ConvertType(E->getType()));
- return MakeAddrLValue(Address(llvm::UndefValue::get(Ty), CharUnits::One()),
- E->getType());
+ llvm::Type *ElTy = ConvertType(E->getType());
+ llvm::Type *Ty = llvm::PointerType::getUnqual(ElTy);
+ return MakeAddrLValue(
+ Address(llvm::UndefValue::get(Ty), ElTy, CharUnits::One()), E->getType());
}
bool CodeGenFunction::IsWrappedCXXThis(const Expr *Obj) {
@@ -2741,8 +2744,10 @@ LValue CodeGenFunction::EmitDeclRefLValue(const DeclRefExpr *E) {
LValue CapLVal =
EmitCapturedFieldLValue(*this, CapturedStmtInfo->lookup(VD),
CapturedStmtInfo->getContextValue());
+ Address LValueAddress = CapLVal.getAddress(*this);
CapLVal = MakeAddrLValue(
- Address(CapLVal.getPointer(*this), getContext().getDeclAlign(VD)),
+ Address(LValueAddress.getPointer(), LValueAddress.getElementType(),
+ getContext().getDeclAlign(VD)),
CapLVal.getType(), LValueBaseInfo(AlignmentSource::Decl),
CapLVal.getTBAAInfo());
// Mark lvalue as nontemporal if the variable is marked as nontemporal
@@ -3431,7 +3436,8 @@ void CodeGenFunction::EmitCfiCheckFail() {
CfiCheckFailDataTy,
Builder.CreatePointerCast(Data, CfiCheckFailDataTy->getPointerTo(0)), 0,
0);
- Address CheckKindAddr(V, getIntAlign());
+
+ Address CheckKindAddr(V, Int8Ty, getIntAlign());
llvm::Value *CheckKind = Builder.CreateLoad(CheckKindAddr);
llvm::Value *AllVtables = llvm::MetadataAsValue::get(
@@ -3817,7 +3823,7 @@ LValue CodeGenFunction::EmitArraySubscriptExpr(const ArraySubscriptExpr *E,
llvm::Value *EltPtr =
emitArraySubscriptGEP(*this, Addr.getElementType(), Addr.getPointer(),
ScaledIdx, false, SignedIndices, E->getExprLoc());
- Addr = Address(EltPtr, EltAlign);
+ Addr = Address(EltPtr, Addr.getElementType(), EltAlign);
// Cast back.
Addr = Builder.CreateBitCast(Addr, OrigBaseTy);
@@ -3917,7 +3923,8 @@ static Address emitOMPArraySectionBase(CodeGenFunction &CGF, const Expr *Base,
CGF.CGM.getNaturalTypeAlignment(ElTy, &TypeBaseInfo, &TypeTBAAInfo);
BaseInfo.mergeForCast(TypeBaseInfo);
TBAAInfo = CGF.CGM.mergeTBAAInfoForCast(TBAAInfo, TypeTBAAInfo);
- return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)), Align);
+ return Address(CGF.Builder.CreateLoad(BaseLVal.getAddress(CGF)),
+ CGF.ConvertTypeForMem(ElTy), Align);
}
return CGF.EmitPointerWithAlignment(Base, &BaseInfo, &TBAAInfo);
}
@@ -4374,7 +4381,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
// fields may leak the real address of dynamic object, which could result
// in miscompilation when leaked pointer would be compared.
auto *stripped = Builder.CreateStripInvariantGroup(addr.getPointer());
- addr = Address(stripped, addr.getAlignment());
+ addr = Address(stripped, addr.getElementType(), addr.getAlignment());
}
}
@@ -4395,7 +4402,7 @@ LValue CodeGenFunction::EmitLValueForField(LValue base,
addr = Address(
Builder.CreatePreserveUnionAccessIndex(
addr.getPointer(), getDebugInfoFIndex(rec, field->getFieldIndex()), DbgInfo),
- addr.getAlignment());
+ addr.getElementType(), addr.getAlignment());
}
if (FieldType->isReferenceType())
@@ -4779,7 +4786,8 @@ LValue CodeGenFunction::EmitCastLValue(const CastExpr *E) {
*this, LV.getPointer(*this),
E->getSubExpr()->getType().getAddressSpace(),
E->getType().getAddressSpace(), ConvertType(DestTy));
- return MakeAddrLValue(Address(V, LV.getAddress(*this).getAlignment()),
+ return MakeAddrLValue(Address(V, ConvertTypeForMem(E->getType()),
+ LV.getAddress(*this).getAlignment()),
E->getType(), LV.getBaseInfo(), LV.getTBAAInfo());
}
case CK_ObjCObjectLValueCast: {
@@ -5333,7 +5341,8 @@ RValue CodeGenFunction::EmitCall(QualType CalleeType, const CGCallee &OrigCallee
llvm::Value *Handle = Callee.getFunctionPointer();
auto *Cast =
Builder.CreateBitCast(Handle, Handle->getType()->getPointerTo());
- auto *Stub = Builder.CreateLoad(Address(Cast, CGM.getPointerAlign()));
+ auto *Stub = Builder.CreateLoad(
+ Address(Cast, Handle->getType(), CGM.getPointerAlign()));
Callee.setFunctionPointer(Stub);
}
llvm::CallBase *CallOrInvoke = nullptr;
diff --git a/clang/lib/CodeGen/CodeGenModule.cpp b/clang/lib/CodeGen/CodeGenModule.cpp
index 8a7345a9f494..772059a436d1 100644
--- a/clang/lib/CodeGen/CodeGenModule.cpp
+++ b/clang/lib/CodeGen/CodeGenModule.cpp
@@ -566,9 +566,6 @@ void CodeGenModule::Release() {
"__amdgpu_device_library_preserve_asan_functions_ptr", nullptr,
llvm::GlobalVariable::NotThreadLocal);
addCompilerUsedGlobal(Var);
- if (!getModule().getModuleFlag("amdgpu_hostcall")) {
- getModule().addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
- }
}
// Emit amdgpu_code_object_version module flag, which is code object version
// times 100.
@@ -5673,9 +5670,11 @@ ConstantAddress CodeGenModule::GetAddrOfGlobalTemporary(
getModule(), Type, false, llvm::GlobalVariable::InternalLinkage,
nullptr);
}
- return ConstantAddress(
- InsertResult.first->second,
- InsertResult.first->second->getType()->getPointerElementType(), Align);
+ return ConstantAddress(InsertResult.first->second,
+ llvm::cast<llvm::GlobalVariable>(
+ InsertResult.first->second->stripPointerCasts())
+ ->getValueType(),
+ Align);
}
// FIXME: If an externally-visible declaration extends multiple temporaries,
diff --git a/clang/lib/Format/ContinuationIndenter.cpp b/clang/lib/Format/ContinuationIndenter.cpp
index 93d409118128..42c3d2e4326b 100644
--- a/clang/lib/Format/ContinuationIndenter.cpp
+++ b/clang/lib/Format/ContinuationIndenter.cpp
@@ -478,11 +478,32 @@ bool ContinuationIndenter::mustBreak(const LineState &State) {
return true;
if (Current.NestingLevel == 0 && !Current.isTrailingComment()) {
- // Always break after "template <...>" and leading annotations. This is only
- // for cases where the entire line does not fit on a single line as a
+ // Always break after "template <...>"(*) and leading annotations. This is
+ // only for cases where the entire line does not fit on a single line as a
// different LineFormatter would be used otherwise.
- if (Previous.ClosesTemplateDeclaration)
+ // *: Except when another option interferes with that, like concepts.
+ if (Previous.ClosesTemplateDeclaration) {
+ if (Current.is(tok::kw_concept)) {
+ switch (Style.BreakBeforeConceptDeclarations) {
+ case FormatStyle::BBCDS_Allowed:
+ break;
+ case FormatStyle::BBCDS_Always:
+ return true;
+ case FormatStyle::BBCDS_Never:
+ return false;
+ }
+ }
+ if (Current.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return false;
+ default:
+ return true;
+ }
+ }
return Style.AlwaysBreakTemplateDeclarations != FormatStyle::BTDS_No;
+ }
if (Previous.is(TT_FunctionAnnotationRParen) &&
State.Line->Type != LT_PreprocessorDirective)
return true;
@@ -669,6 +690,7 @@ void ContinuationIndenter::addTokenOnCurrentLine(LineState &State, bool DryRun,
if (Style.AlignAfterOpenBracket != FormatStyle::BAS_DontAlign &&
!State.Stack.back().IsCSharpGenericTypeConstraint &&
Previous.opensScope() && Previous.isNot(TT_ObjCMethodExpr) &&
+ Previous.isNot(TT_RequiresClause) &&
(Current.isNot(TT_LineComment) || Previous.is(BK_BracedInit))) {
State.Stack.back().Indent = State.Column + Spaces;
State.Stack.back().IsAligned = true;
@@ -880,7 +902,8 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
Previous.is(TT_BinaryOperator))
State.Stack.back().BreakBeforeParameter = false;
if (PreviousNonComment &&
- PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) &&
+ (PreviousNonComment->isOneOf(TT_TemplateCloser, TT_JavaAnnotation) ||
+ PreviousNonComment->ClosesRequiresClause) &&
Current.NestingLevel == 0)
State.Stack.back().BreakBeforeParameter = false;
if (NextNonComment->is(tok::question) ||
@@ -927,13 +950,19 @@ unsigned ContinuationIndenter::addTokenOnNewLine(LineState &State,
State.Stack[State.Stack.size() - 2].NestedBlockInlined) ||
(Style.Language == FormatStyle::LK_ObjC && Current.is(tok::r_brace) &&
State.Stack.size() > 1 && !Style.ObjCBreakBeforeNestedBlockParam);
+ // Do not force parameter break for statements with requires expressions.
+ NestedBlockSpecialCase =
+ NestedBlockSpecialCase ||
+ (Current.MatchingParen &&
+ Current.MatchingParen->is(TT_RequiresExpressionLBrace));
if (!NestedBlockSpecialCase)
for (unsigned i = 0, e = State.Stack.size() - 1; i != e; ++i)
State.Stack[i].BreakBeforeParameter = true;
if (PreviousNonComment &&
!PreviousNonComment->isOneOf(tok::comma, tok::colon, tok::semi) &&
- (PreviousNonComment->isNot(TT_TemplateCloser) ||
+ ((PreviousNonComment->isNot(TT_TemplateCloser) &&
+ !PreviousNonComment->ClosesRequiresClause) ||
Current.NestingLevel != 0) &&
!PreviousNonComment->isOneOf(
TT_BinaryOperator, TT_FunctionAnnotationRParen, TT_JavaAnnotation,
@@ -1096,8 +1125,20 @@ unsigned ContinuationIndenter::getNewLineColumn(const LineState &State) {
}
if (Previous.is(tok::comma) && State.Stack.back().VariablePos != 0)
return State.Stack.back().VariablePos;
+ if (Current.is(TT_RequiresClause)) {
+ if (Style.IndentRequiresClause)
+ return State.Stack.back().Indent + Style.IndentWidth;
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return State.Stack.back().Indent;
+ default:
+ break;
+ }
+ }
if ((PreviousNonComment &&
(PreviousNonComment->ClosesTemplateDeclaration ||
+ PreviousNonComment->ClosesRequiresClause ||
PreviousNonComment->isOneOf(
TT_AttributeParen, TT_AttributeSquare, TT_FunctionAnnotationRParen,
TT_JavaAnnotation, TT_LeadingJavaAnnotation))) ||
@@ -1288,6 +1329,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
State.Column + Current.ColumnWidth + 1;
if (Current.isOneOf(TT_LambdaLSquare, TT_LambdaArrow))
State.Stack.back().LastSpace = State.Column;
+ if (Current.is(TT_RequiresExpression))
+ State.Stack.back().NestedBlockIndent = State.Column;
// Insert scopes created by fake parenthesis.
const FormatToken *Previous = Current.getPreviousNonComment();
@@ -1298,8 +1341,8 @@ unsigned ContinuationIndenter::moveStateToNextToken(LineState &State,
// foo();
// bar();
// }, a, b, c);
- if (Current.isNot(tok::comment) && Previous &&
- Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
+ if (Current.isNot(tok::comment) && !Current.ClosesRequiresClause &&
+ Previous && Previous->isOneOf(tok::l_brace, TT_ArrayInitializerLSquare) &&
!Previous->is(TT_DictLiteral) && State.Stack.size() > 1 &&
!State.Stack.back().HasMultipleNestedBlocks) {
if (State.Stack[State.Stack.size() - 2].NestedBlockInlined && Newline)
@@ -1359,14 +1402,15 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
const FormatToken *Previous = Current.getPreviousNonComment();
// Don't add extra indentation for the first fake parenthesis after
- // 'return', assignments or opening <({[. The indentation for these cases
- // is special cased.
+ // 'return', assignments, opening <({[, or requires clauses. The indentation
+ // for these cases is special cased.
bool SkipFirstExtraIndent =
- (Previous && (Previous->opensScope() ||
- Previous->isOneOf(tok::semi, tok::kw_return) ||
- (Previous->getPrecedence() == prec::Assignment &&
- Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
- Previous->is(TT_ObjCMethodExpr)));
+ Previous &&
+ (Previous->opensScope() ||
+ Previous->isOneOf(tok::semi, tok::kw_return, TT_RequiresClause) ||
+ (Previous->getPrecedence() == prec::Assignment &&
+ Style.AlignOperands != FormatStyle::OAS_DontAlign) ||
+ Previous->is(TT_ObjCMethodExpr));
for (const auto &PrecedenceLevel : llvm::reverse(Current.FakeLParens)) {
ParenState NewParenState = State.Stack.back();
NewParenState.Tok = nullptr;
@@ -1399,7 +1443,7 @@ void ContinuationIndenter::moveStatePastFakeLParens(LineState &State,
if (Previous &&
(Previous->getPrecedence() == prec::Assignment ||
- Previous->is(tok::kw_return) ||
+ Previous->isOneOf(tok::kw_return, TT_RequiresClause) ||
(PrecedenceLevel == prec::Conditional && Previous->is(tok::question) &&
Previous->is(TT_ConditionalExpr))) &&
!Newline) {
@@ -1457,6 +1501,12 @@ void ContinuationIndenter::moveStatePastFakeRParens(LineState &State) {
State.Stack.pop_back();
State.Stack.back().VariablePos = VariablePos;
}
+
+ if (State.NextToken->ClosesRequiresClause && Style.IndentRequiresClause) {
+ // Remove the indentation of the requires clauses (which is not in Indent,
+ // but in LastSpace).
+ State.Stack.back().LastSpace -= Style.IndentWidth;
+ }
}
void ContinuationIndenter::moveStatePastScopeOpener(LineState &State,
diff --git a/clang/lib/Format/Format.cpp b/clang/lib/Format/Format.cpp
index 551d8cfe7ec1..8aa12867c35f 100644
--- a/clang/lib/Format/Format.cpp
+++ b/clang/lib/Format/Format.cpp
@@ -258,6 +258,21 @@ struct ScalarEnumerationTraits<
};
template <>
+struct ScalarEnumerationTraits<
+ FormatStyle::BreakBeforeConceptDeclarationsStyle> {
+ static void
+ enumeration(IO &IO, FormatStyle::BreakBeforeConceptDeclarationsStyle &Value) {
+ IO.enumCase(Value, "Never", FormatStyle::BBCDS_Never);
+ IO.enumCase(Value, "Allowed", FormatStyle::BBCDS_Allowed);
+ IO.enumCase(Value, "Always", FormatStyle::BBCDS_Always);
+
+ // For backward compatibility.
+ IO.enumCase(Value, "true", FormatStyle::BBCDS_Always);
+ IO.enumCase(Value, "false", FormatStyle::BBCDS_Allowed);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::BreakConstructorInitializersStyle> {
static void
enumeration(IO &IO, FormatStyle::BreakConstructorInitializersStyle &Value) {
@@ -464,6 +479,17 @@ struct ScalarEnumerationTraits<FormatStyle::ReferenceAlignmentStyle> {
};
template <>
+struct ScalarEnumerationTraits<FormatStyle::RequiresClausePositionStyle> {
+ static void enumeration(IO &IO,
+ FormatStyle::RequiresClausePositionStyle &Value) {
+ IO.enumCase(Value, "OwnLine", FormatStyle::RCPS_OwnLine);
+ IO.enumCase(Value, "WithPreceding", FormatStyle::RCPS_WithPreceding);
+ IO.enumCase(Value, "WithFollowing", FormatStyle::RCPS_WithFollowing);
+ IO.enumCase(Value, "SingleLine", FormatStyle::RCPS_SingleLine);
+ }
+};
+
+template <>
struct ScalarEnumerationTraits<FormatStyle::SpaceBeforeParensStyle> {
static void enumeration(IO &IO, FormatStyle::SpaceBeforeParensStyle &Value) {
IO.enumCase(Value, "Never", FormatStyle::SBPO_Never);
@@ -565,6 +591,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("DerivePointerBinding", Style.DerivePointerAlignment);
IO.mapOptional("IndentFunctionDeclarationAfterType",
Style.IndentWrappedFunctionNames);
+ IO.mapOptional("IndentRequires", Style.IndentRequiresClause);
IO.mapOptional("PointerBindsToType", Style.PointerAlignment);
IO.mapOptional("SpaceAfterControlStatementKeyword",
Style.SpaceBeforeParens);
@@ -737,7 +764,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("IndentGotoLabels", Style.IndentGotoLabels);
IO.mapOptional("IndentPPDirectives", Style.IndentPPDirectives);
IO.mapOptional("IndentExternBlock", Style.IndentExternBlock);
- IO.mapOptional("IndentRequires", Style.IndentRequires);
+ IO.mapOptional("IndentRequiresClause", Style.IndentRequiresClause);
IO.mapOptional("IndentWidth", Style.IndentWidth);
IO.mapOptional("IndentWrappedFunctionNames",
Style.IndentWrappedFunctionNames);
@@ -782,6 +809,7 @@ template <> struct MappingTraits<FormatStyle> {
IO.mapOptional("ReferenceAlignment", Style.ReferenceAlignment);
IO.mapOptional("ReflowComments", Style.ReflowComments);
IO.mapOptional("RemoveBracesLLVM", Style.RemoveBracesLLVM);
+ IO.mapOptional("RequiresClausePosition", Style.RequiresClausePosition);
IO.mapOptional("SeparateDefinitionBlocks", Style.SeparateDefinitionBlocks);
IO.mapOptional("ShortNamespaceLines", Style.ShortNamespaceLines);
IO.mapOptional("SortIncludes", Style.SortIncludes);
@@ -1130,7 +1158,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.BinPackArguments = true;
LLVMStyle.BinPackParameters = true;
LLVMStyle.BreakBeforeBinaryOperators = FormatStyle::BOS_None;
- LLVMStyle.BreakBeforeConceptDeclarations = true;
+ LLVMStyle.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Always;
LLVMStyle.BreakBeforeTernaryOperators = true;
LLVMStyle.BreakBeforeBraces = FormatStyle::BS_Attach;
LLVMStyle.BraceWrapping = {/*AfterCaseLabel=*/false,
@@ -1188,7 +1216,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.IndentCaseBlocks = false;
LLVMStyle.IndentGotoLabels = true;
LLVMStyle.IndentPPDirectives = FormatStyle::PPDIS_None;
- LLVMStyle.IndentRequires = false;
+ LLVMStyle.IndentRequiresClause = true;
LLVMStyle.IndentWrappedFunctionNames = false;
LLVMStyle.IndentWidth = 2;
LLVMStyle.PPIndentWidth = -1;
@@ -1207,6 +1235,7 @@ FormatStyle getLLVMStyle(FormatStyle::LanguageKind Language) {
LLVMStyle.ObjCSpaceBeforeProtocolList = true;
LLVMStyle.PointerAlignment = FormatStyle::PAS_Right;
LLVMStyle.ReferenceAlignment = FormatStyle::RAS_Pointer;
+ LLVMStyle.RequiresClausePosition = FormatStyle::RCPS_OwnLine;
LLVMStyle.SeparateDefinitionBlocks = FormatStyle::SDS_Leave;
LLVMStyle.ShortNamespaceLines = 1;
LLVMStyle.SpacesBeforeTrailingComments = 1;
@@ -3048,6 +3077,15 @@ reformat(const FormatStyle &Style, StringRef Code,
FormatStyle Expanded = Style;
expandPresetsBraceWrapping(Expanded);
expandPresetsSpaceBeforeParens(Expanded);
+ switch (Expanded.RequiresClausePosition) {
+ case FormatStyle::RCPS_SingleLine:
+ case FormatStyle::RCPS_WithPreceding:
+ Expanded.IndentRequiresClause = false;
+ break;
+ default:
+ break;
+ }
+
if (Expanded.DisableFormat)
return {tooling::Replacements(), 0};
if (isLikelyXml(Code))
diff --git a/clang/lib/Format/FormatToken.h b/clang/lib/Format/FormatToken.h
index 4c03f436dde3..fee365ecc8f9 100644
--- a/clang/lib/Format/FormatToken.h
+++ b/clang/lib/Format/FormatToken.h
@@ -35,12 +35,13 @@ namespace format {
TYPE(BinaryOperator) \
TYPE(BitFieldColon) \
TYPE(BlockComment) \
+ TYPE(BracedListLBrace) \
TYPE(CastRParen) \
+ TYPE(CompoundRequirementLBrace) \
TYPE(ConditionalExpr) \
TYPE(ConflictAlternative) \
TYPE(ConflictEnd) \
TYPE(ConflictStart) \
- TYPE(ConstraintJunctions) \
TYPE(CtorInitializerColon) \
TYPE(CtorInitializerComma) \
TYPE(DesignatedInitializerLSquare) \
@@ -98,6 +99,11 @@ namespace format {
TYPE(RangeBasedForLoopColon) \
TYPE(RecordLBrace) \
TYPE(RegexLiteral) \
+ TYPE(RequiresClause) \
+ TYPE(RequiresClauseInARequiresExpression) \
+ TYPE(RequiresExpression) \
+ TYPE(RequiresExpressionLBrace) \
+ TYPE(RequiresExpressionLParen) \
TYPE(SelectorName) \
TYPE(StartOfName) \
TYPE(StatementAttributeLikeMacro) \
@@ -245,8 +251,9 @@ struct FormatToken {
CanBreakBefore(false), ClosesTemplateDeclaration(false),
StartsBinaryExpression(false), EndsBinaryExpression(false),
PartOfMultiVariableDeclStmt(false), ContinuesLineCommentSection(false),
- Finalized(false), BlockKind(BK_Unknown), Decision(FD_Unformatted),
- PackingKind(PPK_Inconclusive), Type(TT_Unknown) {}
+ Finalized(false), ClosesRequiresClause(false), BlockKind(BK_Unknown),
+ Decision(FD_Unformatted), PackingKind(PPK_Inconclusive),
+ Type(TT_Unknown) {}
/// The \c Token.
Token Tok;
@@ -312,6 +319,9 @@ struct FormatToken {
/// changes.
unsigned Finalized : 1;
+ /// \c true if this is the last token within requires clause.
+ unsigned ClosesRequiresClause : 1;
+
private:
/// Contains the kind of block if this token is a brace.
unsigned BlockKind : 2;
diff --git a/clang/lib/Format/TokenAnnotator.cpp b/clang/lib/Format/TokenAnnotator.cpp
index 70f92c26fa8d..dabecbf9c74a 100644
--- a/clang/lib/Format/TokenAnnotator.cpp
+++ b/clang/lib/Format/TokenAnnotator.cpp
@@ -1019,7 +1019,7 @@ private:
return false;
if (Line.MustBeDeclaration && Contexts.size() == 1 &&
!Contexts.back().IsExpression && !Line.startsWith(TT_ObjCProperty) &&
- !Tok->is(TT_TypeDeclarationParen) &&
+ !Tok->isOneOf(TT_TypeDeclarationParen, TT_RequiresExpressionLParen) &&
(!Tok->Previous || !Tok->Previous->isOneOf(tok::kw___attribute,
TT_LeadingJavaAnnotation)))
Line.MightBeFunctionDecl = true;
@@ -1152,6 +1152,10 @@ private:
parseCSharpGenericTypeConstraint();
}
break;
+ case tok::arrow:
+ if (Tok->Previous && Tok->Previous->is(tok::kw_noexcept))
+ Tok->setType(TT_TrailingReturnArrow);
+ break;
default:
break;
}
@@ -1412,9 +1416,12 @@ private:
TT_ImplicitStringLiteral, TT_InlineASMBrace, TT_FatArrow,
TT_LambdaArrow, TT_NamespaceMacro, TT_OverloadedOperator,
TT_RegexLiteral, TT_TemplateString, TT_ObjCStringLiteral,
- TT_UntouchableMacroFunc, TT_ConstraintJunctions,
- TT_StatementAttributeLikeMacro, TT_FunctionLikeOrFreestandingMacro,
- TT_RecordLBrace))
+ TT_UntouchableMacroFunc, TT_StatementAttributeLikeMacro,
+ TT_FunctionLikeOrFreestandingMacro, TT_RecordLBrace,
+ TT_RequiresClause, TT_RequiresClauseInARequiresExpression,
+ TT_RequiresExpression, TT_RequiresExpressionLParen,
+ TT_RequiresExpressionLBrace, TT_BinaryOperator,
+ TT_CompoundRequirementLBrace, TT_BracedListLBrace))
CurrentToken->setType(TT_Unknown);
CurrentToken->Role.reset();
CurrentToken->MatchingParen = nullptr;
@@ -1609,7 +1616,8 @@ private:
PriorLeadingIdentifier = PriorLeadingIdentifier->Previous;
return (PriorLeadingIdentifier &&
- PriorLeadingIdentifier->is(TT_TemplateCloser) &&
+ (PriorLeadingIdentifier->is(TT_TemplateCloser) ||
+ PriorLeadingIdentifier->ClosesRequiresClause) &&
LeadingIdentifier->TokenText == Current.Next->TokenText);
}
}
@@ -1826,6 +1834,9 @@ private:
if (!PreviousNotConst)
return false;
+ if (PreviousNotConst->ClosesRequiresClause)
+ return false;
+
bool IsPPKeyword = PreviousNotConst->is(tok::identifier) &&
PreviousNotConst->Previous &&
PreviousNotConst->Previous->is(tok::hash);
@@ -2164,7 +2175,7 @@ class ExpressionParser {
public:
ExpressionParser(const FormatStyle &Style, const AdditionalKeywords &Keywords,
AnnotatedLine &Line)
- : Style(Style), Keywords(Keywords), Current(Line.First) {}
+ : Style(Style), Keywords(Keywords), Line(Line), Current(Line.First) {}
/// Parse expressions with the given operator precedence.
void parse(int Precedence = 0) {
@@ -2219,7 +2230,11 @@ public:
break;
// Consume scopes: (), [], <> and {}
- if (Current->opensScope()) {
+ // In addition to that we handle require clauses as scope, so that the
+ // constraints in that are correctly indented.
+ if (Current->opensScope() ||
+ Current->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression)) {
// In fragment of a JavaScript template string can look like '}..${' and
// thus close a scope and open a new one at the same time.
while (Current && (!Current->closesScope() || Current->opensScope())) {
@@ -2241,12 +2256,26 @@ public:
}
if (LatestOperator && (Current || Precedence > 0)) {
- // LatestOperator->LastOperator = true;
+ // The requires clauses do not neccessarily end in a semicolon or a brace,
+ // but just go over to struct/class or a function declaration, we need to
+ // intervene so that the fake right paren is inserted correctly.
+ auto End =
+ (Start->Previous &&
+ Start->Previous->isOneOf(TT_RequiresClause,
+ TT_RequiresClauseInARequiresExpression))
+ ? [this](){
+ auto Ret = Current ? Current : Line.Last;
+ while (!Ret->ClosesRequiresClause && Ret->Previous)
+ Ret = Ret->Previous;
+ return Ret;
+ }()
+ : nullptr;
+
if (Precedence == PrecedenceArrowAndPeriod) {
// Call expressions don't have a binary operator precedence.
- addFakeParenthesis(Start, prec::Unknown);
+ addFakeParenthesis(Start, prec::Unknown, End);
} else {
- addFakeParenthesis(Start, prec::Level(Precedence));
+ addFakeParenthesis(Start, prec::Level(Precedence), End);
}
}
}
@@ -2295,17 +2324,17 @@ private:
return -1;
}
- void addFakeParenthesis(FormatToken *Start, prec::Level Precedence) {
+ void addFakeParenthesis(FormatToken *Start, prec::Level Precedence,
+ FormatToken *End = nullptr) {
Start->FakeLParens.push_back(Precedence);
if (Precedence > prec::Unknown)
Start->StartsBinaryExpression = true;
- if (Current) {
- FormatToken *Previous = Current->Previous;
- while (Previous->is(tok::comment) && Previous->Previous)
- Previous = Previous->Previous;
- ++Previous->FakeRParens;
+ if (!End && Current)
+ End = Current->getPreviousNonComment();
+ if (End) {
+ ++End->FakeRParens;
if (Precedence > prec::Unknown)
- Previous->EndsBinaryExpression = true;
+ End->EndsBinaryExpression = true;
}
}
@@ -2350,6 +2379,7 @@ private:
const FormatStyle &Style;
const AdditionalKeywords &Keywords;
+ const AnnotatedLine &Line;
FormatToken *Current;
};
@@ -2920,6 +2950,8 @@ unsigned TokenAnnotator::splitPenalty(const AnnotatedLine &Line,
}
if (Left.ClosesTemplateDeclaration)
return Style.PenaltyBreakTemplateDeclaration;
+ if (Left.ClosesRequiresClause)
+ return 0;
if (Left.is(TT_ConditionalExpr))
return prec::Conditional;
prec::Level Level = Left.getPrecedence();
@@ -2987,9 +3019,6 @@ bool TokenAnnotator::spaceRequiredBetween(const AnnotatedLine &Line,
if (Left.isOneOf(tok::kw_co_await, tok::kw_co_yield, tok::kw_co_return) &&
Right.isNot(tok::semi))
return true;
- // requires clause Concept1<T> && Concept2<T>
- if (Left.is(TT_ConstraintJunctions) && Right.is(tok::identifier))
- return true;
if (Left.is(tok::l_paren) || Right.is(tok::r_paren))
return (Right.is(TT_CastRParen) ||
@@ -3892,6 +3921,15 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
if (Right.is(tok::lessless) && Right.Next && Left.is(tok::string_literal) &&
Right.Next->is(tok::string_literal))
return true;
+ if (Right.is(TT_RequiresClause)) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithFollowing:
+ return true;
+ default:
+ break;
+ }
+ }
// Can break after template<> declaration
if (Left.ClosesTemplateDeclaration && Left.MatchingParen &&
Left.MatchingParen->NestingLevel == 0) {
@@ -3899,9 +3937,18 @@ bool TokenAnnotator::mustBreakBefore(const AnnotatedLine &Line,
// template<typename T>
// concept ...
if (Right.is(tok::kw_concept))
- return Style.BreakBeforeConceptDeclarations;
+ return Style.BreakBeforeConceptDeclarations == FormatStyle::BBCDS_Always;
return (Style.AlwaysBreakTemplateDeclarations == FormatStyle::BTDS_Yes);
}
+ if (Left.ClosesRequiresClause) {
+ switch (Style.RequiresClausePosition) {
+ case FormatStyle::RCPS_OwnLine:
+ case FormatStyle::RCPS_WithPreceding:
+ return true;
+ default:
+ break;
+ }
+ }
if (Style.PackConstructorInitializers == FormatStyle::PCIS_Never) {
if (Style.BreakConstructorInitializers == FormatStyle::BCIS_BeforeColon &&
(Left.is(TT_CtorInitializerComma) || Right.is(TT_CtorInitializerColon)))
@@ -4296,8 +4343,14 @@ bool TokenAnnotator::canBreakBefore(const AnnotatedLine &Line,
return Left.isNot(tok::period); // FIXME: Properly parse ObjC calls.
if (Left.is(tok::r_paren) && Line.Type == LT_ObjCProperty)
return true;
+ if (Right.is(tok::kw_concept))
+ return Style.BreakBeforeConceptDeclarations != FormatStyle::BBCDS_Never;
+ if (Right.is(TT_RequiresClause))
+ return true;
if (Left.ClosesTemplateDeclaration || Left.is(TT_FunctionAnnotationRParen))
return true;
+ if (Left.ClosesRequiresClause)
+ return true;
if (Right.isOneOf(TT_RangeBasedForLoopColon, TT_OverloadedOperatorLParen,
TT_OverloadedOperator))
return false;
diff --git a/clang/lib/Format/UnwrappedLineParser.cpp b/clang/lib/Format/UnwrappedLineParser.cpp
index 0686aeb253ad..e2d5197988be 100644
--- a/clang/lib/Format/UnwrappedLineParser.cpp
+++ b/clang/lib/Format/UnwrappedLineParser.cpp
@@ -369,7 +369,7 @@ void UnwrappedLineParser::parseFile() {
if (Style.Language == FormatStyle::LK_TextProto)
parseBracedList();
else
- parseLevel(/*HasOpeningBrace=*/false);
+ parseLevel(/*HasOpeningBrace=*/false, /*CanContainBracedList=*/true);
// Make sure to format the remaining tokens.
//
// LK_TextProto is special since its top-level is parsed as the body of a
@@ -436,10 +436,20 @@ bool UnwrappedLineParser::precededByCommentOrPPDirective() const {
return Previous && Previous->is(tok::comment) &&
(Previous->IsMultiline || Previous->NewlinesBefore > 0);
}
-
-// Returns true if a simple block, or false otherwise. (A simple block has a
-// single statement.)
-bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
+/// \brief Parses a level, that is ???.
+/// \param HasOpeningBrace If that level is started by an opening brace.
+/// \param CanContainBracedList If the content can contain (at any level) a
+/// braced list.
+/// \param NextLBracesType The type for left brace found in this level.
+/// \returns true if a simple block, or false otherwise. (A simple block has a
+/// single statement.)
+bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace,
+ bool CanContainBracedList,
+ IfStmtKind *IfKind,
+ TokenType NextLBracesType) {
+ auto NextLevelLBracesType = NextLBracesType == TT_CompoundRequirementLBrace
+ ? TT_BracedListLBrace
+ : TT_Unknown;
const bool IsPrecededByCommentOrPPDirective =
!Style.RemoveBracesLLVM || precededByCommentOrPPDirective();
unsigned StatementCount = 0;
@@ -451,17 +461,36 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
else if (FormatTok->getType() == TT_MacroBlockEnd)
kind = tok::r_brace;
+ auto ParseDefault = [this, HasOpeningBrace, IfKind, NextLevelLBracesType,
+ &StatementCount] {
+ parseStructuralElement(IfKind, /*IsTopLevel=*/!HasOpeningBrace,
+ /*NextLBracesType=*/NextLevelLBracesType);
+ ++StatementCount;
+ assert(StatementCount > 0 && "StatementCount overflow!");
+ };
+
switch (kind) {
case tok::comment:
nextToken();
addUnwrappedLine();
break;
case tok::l_brace:
- // FIXME: Add parameter whether this can happen - if this happens, we must
- // be in a non-declaration context.
- if (!FormatTok->is(TT_MacroBlockBegin) && tryToParseBracedList())
+ if (NextLBracesType != TT_Unknown)
+ FormatTok->setType(NextLBracesType);
+ else if (FormatTok->Previous &&
+ FormatTok->Previous->ClosesRequiresClause) {
+ // We need the 'default' case here to correctly parse a function
+ // l_brace.
+ ParseDefault();
continue;
- parseBlock();
+ }
+ if (CanContainBracedList && !FormatTok->is(TT_MacroBlockBegin) &&
+ tryToParseBracedList())
+ continue;
+ parseBlock(/*MustBeDeclaration=*/false, /*AddLevels=*/1u,
+ /*MunchSemi=*/true, /*UnindentWhitesmithBraces=*/false,
+ CanContainBracedList,
+ /*NextLBracesType=*/NextLBracesType);
++StatementCount;
assert(StatementCount > 0 && "StatementCount overflow!");
addUnwrappedLine();
@@ -517,9 +546,7 @@ bool UnwrappedLineParser::parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind) {
}
LLVM_FALLTHROUGH;
default:
- parseStructuralElement(IfKind, !HasOpeningBrace);
- ++StatementCount;
- assert(StatementCount > 0 && "StatementCount overflow!");
+ ParseDefault();
break;
}
} while (!eof());
@@ -594,27 +621,46 @@ void UnwrappedLineParser::calculateBraceTypes(bool ExpectClassBody) {
bool NextIsObjCMethod = NextTok->isOneOf(tok::plus, tok::minus) &&
NextTok->OriginalColumn == 0;
+ // Try to detect a braced list. Note that regardless how we mark inner
+ // braces here, we will overwrite the BlockKind later if we parse a
+ // braced list (where all blocks inside are by default braced lists),
+ // or when we explicitly detect blocks (for example while parsing
+ // lambdas).
+
+ // If we already marked the opening brace as braced list, the closing
+ // must also be part of it.
+ ProbablyBracedList = LBraceStack.back()->is(TT_BracedListLBrace);
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isJavaScript() &&
+ NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
+ Keywords.kw_as));
+ ProbablyBracedList = ProbablyBracedList ||
+ (Style.isCpp() && NextTok->is(tok::l_paren));
+
// If there is a comma, semicolon or right paren after the closing
- // brace, we assume this is a braced initializer list. Note that
- // regardless how we mark inner braces here, we will overwrite the
- // BlockKind later if we parse a braced list (where all blocks
- // inside are by default braced lists), or when we explicitly detect
- // blocks (for example while parsing lambdas).
+ // brace, we assume this is a braced initializer list.
// FIXME: Some of these do not apply to JS, e.g. "} {" can never be a
// braced list in JS.
ProbablyBracedList =
- (Style.isJavaScript() &&
- NextTok->isOneOf(Keywords.kw_of, Keywords.kw_in,
- Keywords.kw_as)) ||
- (Style.isCpp() && NextTok->is(tok::l_paren)) ||
+ ProbablyBracedList ||
NextTok->isOneOf(tok::comma, tok::period, tok::colon,
tok::r_paren, tok::r_square, tok::l_brace,
- tok::ellipsis) ||
+ tok::ellipsis);
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->is(tok::identifier) &&
- !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace)) ||
- (NextTok->is(tok::semi) &&
- (!ExpectClassBody || LBraceStack.size() != 1)) ||
+ !PrevTok->isOneOf(tok::semi, tok::r_brace, tok::l_brace));
+
+ ProbablyBracedList = ProbablyBracedList ||
+ (NextTok->is(tok::semi) &&
+ (!ExpectClassBody || LBraceStack.size() != 1));
+
+ ProbablyBracedList =
+ ProbablyBracedList ||
(NextTok->isBinaryOperator() && !NextIsObjCMethod);
+
if (!Style.isCSharp() && NextTok->is(tok::l_square)) {
// We can have an array subscript after a braced init
// list, but C++11 attributes are expected after blocks.
@@ -680,8 +726,9 @@ size_t UnwrappedLineParser::computePPHash() const {
UnwrappedLineParser::IfStmtKind
UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
- bool MunchSemi,
- bool UnindentWhitesmithsBraces) {
+ bool MunchSemi, bool UnindentWhitesmithsBraces,
+ bool CanContainBracedList,
+ TokenType NextLBracesType) {
assert(FormatTok->isOneOf(tok::l_brace, TT_MacroBlockBegin) &&
"'{' or macro block token expected");
FormatToken *Tok = FormatTok;
@@ -721,7 +768,8 @@ UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
Line->Level += AddLevels;
IfStmtKind IfKind = IfStmtKind::NotIf;
- const bool SimpleBlock = parseLevel(/*HasOpeningBrace=*/true, &IfKind);
+ const bool SimpleBlock = parseLevel(
+ /*HasOpeningBrace=*/true, CanContainBracedList, &IfKind, NextLBracesType);
if (eof())
return IfKind;
@@ -751,8 +799,13 @@ UnwrappedLineParser::parseBlock(bool MustBeDeclaration, unsigned AddLevels,
if (MacroBlock && FormatTok->is(tok::l_paren))
parseParens();
+ if (FormatTok->is(tok::kw_noexcept)) {
+ // A noexcept in a requires expression.
+ nextToken();
+ }
+
if (FormatTok->is(tok::arrow)) {
- // Following the } we can find a trailing return type arrow
+ // Following the } or noexcept we can find a trailing return type arrow
// as part of an implicit conversion constraint.
nextToken();
parseStructuralElement();
@@ -826,7 +879,8 @@ static bool ShouldBreakBeforeBrace(const FormatStyle &Style,
return false;
}
-void UnwrappedLineParser::parseChildBlock() {
+void UnwrappedLineParser::parseChildBlock(
+ bool CanContainBracedList, clang::format::TokenType NextLBracesType) {
FormatTok->setBlockKind(BK_Block);
nextToken();
{
@@ -836,7 +890,8 @@ void UnwrappedLineParser::parseChildBlock() {
ScopedDeclarationState DeclarationState(*Line, DeclarationScopeStack,
/*MustBeDeclaration=*/false);
Line->Level += SkipIndent ? 0 : 1;
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(/*HasOpeningBrace=*/true, CanContainBracedList,
+ /*IfKind=*/nullptr, NextLBracesType);
flushComments(isOnNewLine(*FormatTok));
Line->Level -= SkipIndent ? 0 : 1;
}
@@ -1231,7 +1286,8 @@ void UnwrappedLineParser::readTokenWithJavaScriptASI() {
}
void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
- bool IsTopLevel) {
+ bool IsTopLevel,
+ TokenType NextLBracesType) {
if (Style.Language == FormatStyle::LK_TableGen &&
FormatTok->is(tok::pp_include)) {
nextToken();
@@ -1482,7 +1538,7 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
parseConcept();
return;
case tok::kw_requires:
- parseRequires();
+ parseRequiresClause();
return;
case tok::kw_enum:
// Ignore if this is part of "template <enum ...".
@@ -1562,6 +1618,8 @@ void UnwrappedLineParser::parseStructuralElement(IfStmtKind *IfKind,
parseChildBlock();
break;
case tok::l_brace:
+ if (NextLBracesType != TT_Unknown)
+ FormatTok->setType(NextLBracesType);
if (!tryToParsePropertyAccessor() && !tryToParseBracedList()) {
// A block outside of parentheses must be the last part of a
// structural element.
@@ -2095,7 +2153,10 @@ bool UnwrappedLineParser::parseBracedList(bool ContinueOnSemicolons,
return false;
}
-void UnwrappedLineParser::parseParens() {
+/// \brief Parses a pair of parentheses (and everything between them).
+/// \param AmpAmpTokenType If different than TT_Unknown sets this type for all
+/// double ampersands. This only counts for the current parens scope.
+void UnwrappedLineParser::parseParens(TokenType AmpAmpTokenType) {
assert(FormatTok->Tok.is(tok::l_paren) && "'(' expected.");
nextToken();
do {
@@ -2145,6 +2206,13 @@ void UnwrappedLineParser::parseParens() {
else
nextToken();
break;
+ case tok::kw_requires:
+ parseRequiresExpression();
+ break;
+ case tok::ampamp:
+ if (AmpAmpTokenType != TT_Unknown)
+ FormatTok->setType(AmpAmpTokenType);
+ LLVM_FALLTHROUGH;
default:
nextToken();
break;
@@ -2695,6 +2763,11 @@ void UnwrappedLineParser::parseAccessSpecifier() {
}
}
+/// \brief Parses a concept definition.
+/// \pre The current token has to be the concept keyword.
+///
+/// Returns if either the concept has been completely parsed, or if it detects
+/// that the concept definition is incorrect.
void UnwrappedLineParser::parseConcept() {
assert(FormatTok->Tok.is(tok::kw_concept) && "'concept' expected");
nextToken();
@@ -2704,100 +2777,179 @@ void UnwrappedLineParser::parseConcept() {
if (!FormatTok->Tok.is(tok::equal))
return;
nextToken();
- if (FormatTok->Tok.is(tok::kw_requires)) {
+ parseConstraintExpression();
+ if (FormatTok->Tok.is(tok::semi))
nextToken();
- parseRequiresExpression(Line->Level);
- } else {
- parseConstraintExpression(Line->Level);
- }
+ addUnwrappedLine();
}
-void UnwrappedLineParser::parseRequiresExpression(unsigned int OriginalLevel) {
- // requires (R range)
- if (FormatTok->Tok.is(tok::l_paren)) {
+/// \brief Parses a requires clause.
+/// \pre The current token needs to be the requires keyword.
+/// \sa parseRequiresExpression
+///
+/// Returns if it either has finished parsing the clause, or it detects, that
+/// the clause is incorrect.
+void UnwrappedLineParser::parseRequiresClause() {
+ assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+ assert(FormatTok->getType() == TT_Unknown);
+
+ // If there is no previous token, we are within a requires expression,
+ // otherwise we will always have the template or function declaration in front
+ // of it.
+ bool InRequiresExpression =
+ !FormatTok->Previous ||
+ FormatTok->Previous->is(TT_RequiresExpressionLBrace);
+
+ FormatTok->setType(InRequiresExpression
+ ? TT_RequiresClauseInARequiresExpression
+ : TT_RequiresClause);
+
+ nextToken();
+ parseConstraintExpression();
+
+ if (!InRequiresExpression)
+ FormatTok->Previous->ClosesRequiresClause = true;
+}
+
+/// \brief Parses a requires expression.
+/// \pre The current token needs to be the requires keyword.
+/// \sa parseRequiresClause
+///
+/// Returns if it either has finished parsing the expression, or it detects,
+/// that the expression is incorrect.
+void UnwrappedLineParser::parseRequiresExpression() {
+ assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+ assert(FormatTok->getType() == TT_Unknown);
+
+ FormatTok->setType(TT_RequiresExpression);
+ nextToken();
+
+ if (FormatTok->is(tok::l_paren)) {
+ FormatTok->setType(TT_RequiresExpressionLParen);
parseParens();
- if (Style.IndentRequires && OriginalLevel != Line->Level) {
- addUnwrappedLine();
- --Line->Level;
- }
}
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock();
- addUnwrappedLine();
- } else {
- parseConstraintExpression(OriginalLevel);
+ if (FormatTok->is(tok::l_brace)) {
+ FormatTok->setType(TT_RequiresExpressionLBrace);
+ parseChildBlock(/*CanContainBracedList=*/false,
+ /*NextLBracesType=*/TT_CompoundRequirementLBrace);
}
}
-void UnwrappedLineParser::parseConstraintExpression(
- unsigned int OriginalLevel) {
- // requires Id<T> && Id<T> || Id<T>
- while (
- FormatTok->isOneOf(tok::identifier, tok::kw_requires, tok::coloncolon)) {
- nextToken();
- while (FormatTok->isOneOf(tok::identifier, tok::coloncolon, tok::less,
- tok::greater, tok::comma, tok::ellipsis)) {
- if (FormatTok->Tok.is(tok::less)) {
+/// \brief Parses a constraint expression.
+///
+/// This is either the definition of a concept, or the body of a requires
+/// clause. It returns, when the parsing is complete, or the expression is
+/// incorrect.
+void UnwrappedLineParser::parseConstraintExpression() {
+ do {
+ switch (FormatTok->Tok.getKind()) {
+ case tok::kw_requires:
+ parseRequiresExpression();
+ break;
+
+ case tok::l_paren:
+ parseParens(/*AmpAmpTokenType=*/TT_BinaryOperator);
+ break;
+
+ case tok::l_square:
+ if (!tryToParseLambda())
+ return;
+ break;
+
+ case tok::identifier:
+ // We need to differentiate identifiers for a template deduction guide,
+ // variables, or function return types (the constraint expression has
+ // ended before that), and basically all other cases. But it's easier to
+ // check the other way around.
+ assert(FormatTok->Previous);
+ switch (FormatTok->Previous->Tok.getKind()) {
+ case tok::coloncolon: // Nested identifier.
+ case tok::ampamp: // Start of a function or variable for the
+ case tok::pipepipe: // constraint expression.
+ case tok::kw_requires: // Initial identifier of a requires clause.
+ case tok::equal: // Initial identifier of a concept declaration.
+ break;
+ default:
+ return;
+ }
+
+ // Read identifier with optional template declaration.
+ nextToken();
+ if (FormatTok->Tok.is(tok::less))
parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
/*ClosingBraceKind=*/tok::greater);
- continue;
- }
+ break;
+
+ case tok::kw_const:
+ case tok::semi:
+ case tok::kw_class:
+ case tok::kw_struct:
+ case tok::kw_union:
+ return;
+
+ case tok::l_brace:
+ // Potential function body.
+ return;
+
+ case tok::ampamp:
+ case tok::pipepipe:
+ FormatTok->setType(TT_BinaryOperator);
nextToken();
- }
- if (FormatTok->Tok.is(tok::kw_requires))
- parseRequiresExpression(OriginalLevel);
- if (FormatTok->Tok.is(tok::less)) {
- parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
- /*ClosingBraceKind=*/tok::greater);
- }
+ break;
- if (FormatTok->Tok.is(tok::l_paren))
- parseParens();
- if (FormatTok->Tok.is(tok::l_brace)) {
- if (Style.BraceWrapping.AfterFunction)
- addUnwrappedLine();
- FormatTok->setType(TT_FunctionLBrace);
- parseBlock();
- }
- if (FormatTok->Tok.is(tok::semi)) {
- // Eat any trailing semi.
+ case tok::kw_true:
+ case tok::kw_false:
+ case tok::kw_sizeof:
+ case tok::greater:
+ case tok::greaterequal:
+ case tok::greatergreater:
+ case tok::less:
+ case tok::lessequal:
+ case tok::lessless:
+ case tok::equalequal:
+ case tok::exclaim:
+ case tok::exclaimequal:
+ case tok::plus:
+ case tok::minus:
+ case tok::star:
+ case tok::slash:
+ case tok::numeric_constant:
+ case tok::kw_decltype:
+ case tok::comment:
+ case tok::comma:
+ case tok::coloncolon:
+ // Just eat them.
nextToken();
- addUnwrappedLine();
- }
- if (FormatTok->Tok.is(tok::colon))
- return;
- if (!FormatTok->Tok.isOneOf(tok::ampamp, tok::pipepipe)) {
- if (FormatTok->Previous &&
- !FormatTok->Previous->isOneOf(tok::identifier, tok::kw_requires,
- tok::coloncolon))
- addUnwrappedLine();
- if (Style.IndentRequires && OriginalLevel != Line->Level)
- --Line->Level;
break;
- } else {
- FormatTok->setType(TT_ConstraintJunctions);
- }
- nextToken();
- }
-}
+ case tok::kw_static_cast:
+ case tok::kw_const_cast:
+ case tok::kw_reinterpret_cast:
+ case tok::kw_dynamic_cast:
+ nextToken();
+ if (!FormatTok->is(tok::less))
+ return;
-void UnwrappedLineParser::parseRequires() {
- assert(FormatTok->Tok.is(tok::kw_requires) && "'requires' expected");
+ parseBracedList(/*ContinueOnSemicolons=*/false, /*IsEnum=*/false,
+ /*ClosingBraceKind=*/tok::greater);
+ break;
- unsigned OriginalLevel = Line->Level;
- if (FormatTok->Previous && FormatTok->Previous->is(tok::greater)) {
- addUnwrappedLine();
- if (Style.IndentRequires)
- ++Line->Level;
- }
- nextToken();
+ case tok::kw_bool:
+ // bool is only allowed if it is directly followed by a paren for a cast:
+ // concept C = bool(...);
+ // and bool is the only type, all other types as cast must be inside a
+ // cast to bool an thus are handled by the other cases.
+ nextToken();
+ if (FormatTok->isNot(tok::l_paren))
+ return;
+ parseParens();
+ break;
- parseRequiresExpression(OriginalLevel);
+ default:
+ return;
+ }
+ } while (!eof());
}
bool UnwrappedLineParser::parseEnum() {
@@ -2993,7 +3145,7 @@ void UnwrappedLineParser::parseJavaEnumBody() {
}
// Parse the class body after the enum's ";" if any.
- parseLevel(/*HasOpeningBrace=*/true);
+ parseLevel(/*HasOpeningBrace=*/true, /*CanContainBracedList=*/true);
nextToken();
--Line->Level;
addUnwrappedLine();
diff --git a/clang/lib/Format/UnwrappedLineParser.h b/clang/lib/Format/UnwrappedLineParser.h
index f39d76187f44..d49bbaefd146 100644
--- a/clang/lib/Format/UnwrappedLineParser.h
+++ b/clang/lib/Format/UnwrappedLineParser.h
@@ -92,11 +92,16 @@ private:
void reset();
void parseFile();
bool precededByCommentOrPPDirective() const;
- bool parseLevel(bool HasOpeningBrace, IfStmtKind *IfKind = nullptr);
+ bool parseLevel(bool HasOpeningBrace, bool CanContainBracedList,
+ IfStmtKind *IfKind = nullptr,
+ TokenType NextLBracesType = TT_Unknown);
IfStmtKind parseBlock(bool MustBeDeclaration = false, unsigned AddLevels = 1u,
bool MunchSemi = true,
- bool UnindentWhitesmithsBraces = false);
- void parseChildBlock();
+ bool UnindentWhitesmithsBraces = false,
+ bool CanContainBracedList = true,
+ TokenType NextLBracesType = TT_Unknown);
+ void parseChildBlock(bool CanContainBracedList = true,
+ TokenType NextLBracesType = TT_Unknown);
void parsePPDirective();
void parsePPDefine();
void parsePPIf(bool IfDef);
@@ -106,11 +111,12 @@ private:
void parsePPUnknown();
void readTokenWithJavaScriptASI();
void parseStructuralElement(IfStmtKind *IfKind = nullptr,
- bool IsTopLevel = false);
+ bool IsTopLevel = false,
+ TokenType NextLBracesType = TT_Unknown);
bool tryToParseBracedList();
bool parseBracedList(bool ContinueOnSemicolons = false, bool IsEnum = false,
tok::TokenKind ClosingBraceKind = tok::r_brace);
- void parseParens();
+ void parseParens(TokenType AmpAmpTokenType = TT_Unknown);
void parseSquare(bool LambdaIntroducer = false);
void keepAncestorBraces();
FormatToken *parseIfThenElse(IfStmtKind *IfKind, bool KeepBraces = false);
@@ -127,9 +133,9 @@ private:
bool parseEnum();
bool parseStructLike();
void parseConcept();
- void parseRequires();
- void parseRequiresExpression(unsigned int OriginalLevel);
- void parseConstraintExpression(unsigned int OriginalLevel);
+ void parseRequiresClause();
+ void parseRequiresExpression();
+ void parseConstraintExpression();
void parseJavaEnumBody();
// Parses a record (aka class) as a top level element. If ParseAsExpr is true,
// parses the record as a child block, i.e. if the class declaration is an
diff --git a/clang/lib/Sema/SemaCast.cpp b/clang/lib/Sema/SemaCast.cpp
index e9c1116257d6..7d580dc5b6b3 100644
--- a/clang/lib/Sema/SemaCast.cpp
+++ b/clang/lib/Sema/SemaCast.cpp
@@ -1356,7 +1356,7 @@ static TryCastResult TryStaticCast(Sema &Self, ExprResult &SrcExpr,
if (SrcType->isIntegralOrEnumerationType()) {
// [expr.static.cast]p10 If the enumeration type has a fixed underlying
// type, the value is first converted to that type by integral conversion
- const EnumType *Enum = DestType->getAs<EnumType>();
+ const EnumType *Enum = DestType->castAs<EnumType>();
Kind = Enum->getDecl()->isFixed() &&
Enum->getDecl()->getIntegerType()->isBooleanType()
? CK_IntegralToBoolean
diff --git a/clang/lib/Sema/SemaChecking.cpp b/clang/lib/Sema/SemaChecking.cpp
index 575181a6b61c..c422981a1a2e 100644
--- a/clang/lib/Sema/SemaChecking.cpp
+++ b/clang/lib/Sema/SemaChecking.cpp
@@ -11310,7 +11310,7 @@ void CheckFreeArgumentsCast(Sema &S, const std::string &CalleeName,
/// Alerts the user that they are attempting to free a non-malloc'd object.
void Sema::CheckFreeArguments(const CallExpr *E) {
const std::string CalleeName =
- dyn_cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
+ cast<FunctionDecl>(E->getCalleeDecl())->getQualifiedNameAsString();
{ // Prefer something that doesn't involve a cast to make things simpler.
const Expr *Arg = E->getArg(0)->IgnoreParenCasts();
diff --git a/clang/lib/Sema/SemaCoroutine.cpp b/clang/lib/Sema/SemaCoroutine.cpp
index aae90c403f0f..71fcf4aebda8 100644
--- a/clang/lib/Sema/SemaCoroutine.cpp
+++ b/clang/lib/Sema/SemaCoroutine.cpp
@@ -680,7 +680,7 @@ static void checkNoThrow(Sema &S, const Stmt *E,
QualType::DestructionKind::DK_cxx_destructor) {
const auto *T =
cast<RecordType>(ReturnType.getCanonicalType().getTypePtr());
- checkDeclNoexcept(dyn_cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
+ checkDeclNoexcept(cast<CXXRecordDecl>(T->getDecl())->getDestructor(),
/*IsDtor=*/true);
}
} else
diff --git a/clang/test/Analysis/CFContainers-invalid.c b/clang/test/Analysis/CFContainers-invalid.c
index ce1284f75da7..a86c3d807edc 100644
--- a/clang/test/Analysis/CFContainers-invalid.c
+++ b/clang/test/Analysis/CFContainers-invalid.c
@@ -13,7 +13,7 @@ CFArrayRef CFArrayCreate(CFAllocatorRef);
CFDictionaryRef CFDictionaryCreate(CFAllocatorRef);
CFSetRef CFSetCreate(CFAllocatorRef);
-void testNoCrash() {
+void testNoCrash(void) {
(void)CFArrayCreate(kCFAllocatorDefault);
(void)CFDictionaryCreate(kCFAllocatorDefault);
(void)CFSetCreate(kCFAllocatorDefault);
diff --git a/clang/test/Analysis/CGColorSpace.c b/clang/test/Analysis/CGColorSpace.c
index 38f0512b0063..3faf21ae120d 100644
--- a/clang/test/Analysis/CGColorSpace.c
+++ b/clang/test/Analysis/CGColorSpace.c
@@ -5,12 +5,12 @@ extern CGColorSpaceRef CGColorSpaceCreateDeviceRGB(void);
extern CGColorSpaceRef CGColorSpaceRetain(CGColorSpaceRef space);
extern void CGColorSpaceRelease(CGColorSpaceRef space);
-void f() {
+void f(void) {
CGColorSpaceRef X = CGColorSpaceCreateDeviceRGB(); // expected-warning{{leak}}
CGColorSpaceRetain(X);
}
-void fb() {
+void fb(void) {
CGColorSpaceRef X = CGColorSpaceCreateDeviceRGB();
CGColorSpaceRetain(X);
CGColorSpaceRelease(X);
diff --git a/clang/test/Analysis/Checkers/RunLoopAutoreleaseLeakChecker.m b/clang/test/Analysis/Checkers/RunLoopAutoreleaseLeakChecker.m
index 2bf86410f3ff..d03b761ba0d7 100644
--- a/clang/test/Analysis/Checkers/RunLoopAutoreleaseLeakChecker.m
+++ b/clang/test/Analysis/Checkers/RunLoopAutoreleaseLeakChecker.m
@@ -15,19 +15,19 @@
#ifndef EXTRA
-void just_runloop() { // No warning: no statements in between
+void just_runloop(void) { // No warning: no statements in between
@autoreleasepool {
[[NSRunLoop mainRunLoop] run]; // no-warning
}
}
-void just_xpcmain() { // No warning: no statements in between
+void just_xpcmain(void) { // No warning: no statements in between
@autoreleasepool {
xpc_main(); // no-warning
}
}
-void runloop_init_before() { // Warning: object created before the loop.
+void runloop_init_before(void) { // Warning: object created before the loop.
@autoreleasepool {
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool followed by the launch of main run loop may never get released; consider moving them to a separate autorelease pool}}
(void) object;
@@ -35,7 +35,7 @@ void runloop_init_before() { // Warning: object created before the loop.
}
}
-void runloop_init_before_separate_pool() { // No warning: separate autorelease pool.
+void runloop_init_before_separate_pool(void) { // No warning: separate autorelease pool.
@autoreleasepool {
NSObject *object;
@autoreleasepool {
@@ -46,7 +46,7 @@ void runloop_init_before_separate_pool() { // No warning: separate autorelease p
}
}
-void xpcmain_init_before() { // Warning: object created before the loop.
+void xpcmain_init_before(void) { // Warning: object created before the loop.
@autoreleasepool {
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool followed by the launch of xpc_main may never get released; consider moving them to a separate autorelease pool}}
(void) object;
@@ -54,7 +54,7 @@ void xpcmain_init_before() { // Warning: object created before the loop.
}
}
-void runloop_init_before_two_objects() { // Warning: object created before the loop.
+void runloop_init_before_two_objects(void) { // Warning: object created before the loop.
@autoreleasepool {
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool followed by the launch of main run loop may never get released; consider moving them to a separate autorelease pool}}
NSObject *object2 = [[NSObject alloc] init]; // no-warning, warning on the first one is enough.
@@ -64,13 +64,13 @@ void runloop_init_before_two_objects() { // Warning: object created before the l
}
}
-void runloop_no_autoreleasepool() {
+void runloop_no_autoreleasepool(void) {
NSObject *object = [[NSObject alloc] init]; // no-warning
(void)object;
[[NSRunLoop mainRunLoop] run];
}
-void runloop_init_after() { // No warning: objects created after the loop
+void runloop_init_after(void) { // No warning: objects created after the loop
@autoreleasepool {
[[NSRunLoop mainRunLoop] run];
NSObject *object = [[NSObject alloc] init]; // no-warning
@@ -78,7 +78,7 @@ void runloop_init_after() { // No warning: objects created after the loop
}
}
-void no_crash_on_empty_children() {
+void no_crash_on_empty_children(void) {
@autoreleasepool {
for (;;) {}
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool followed by the launch of main run loop may never get released; consider moving them to a separate autorelease pool}}
@@ -90,7 +90,7 @@ void no_crash_on_empty_children() {
#endif
#ifdef AP1
-int main() {
+int main(void) {
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool of last resort followed by the launch of main run loop may never get released; consider moving them to a separate autorelease pool}}
(void) object;
[[NSRunLoop mainRunLoop] run];
@@ -100,7 +100,7 @@ int main() {
#ifdef AP2
// expected-no-diagnostics
-int main() {
+int main(void) {
NSObject *object = [[NSObject alloc] init]; // no-warning
(void) object;
@autoreleasepool {
@@ -112,7 +112,7 @@ int main() {
#ifdef AP3
// expected-no-diagnostics
-int main() {
+int main(void) {
[[NSRunLoop mainRunLoop] run];
NSObject *object = [[NSObject alloc] init]; // no-warning
(void) object;
@@ -121,7 +121,7 @@ int main() {
#endif
#ifdef AP4
-int main() {
+int main(void) {
NSObject *object = [[NSObject alloc] init]; // expected-warning{{Temporary objects allocated in the autorelease pool of last resort followed by the launch of xpc_main may never get released; consider moving them to a separate autorelease pool}}
(void) object;
xpc_main();
@@ -148,7 +148,7 @@ CFStringRef processString(const __NSConstantString *, void *);
#define CFSTR __builtin___CFStringMakeConstantString
-int main() {
+int main(void) {
I *i;
@autoreleasepool {
NSString *s1 = (__bridge_transfer NSString *)processString(0, 0);
diff --git a/clang/test/Analysis/DeallocUseAfterFreeErrors.m b/clang/test/Analysis/DeallocUseAfterFreeErrors.m
index 2e1bdc41bb6e..c20aebc89c5c 100644
--- a/clang/test/Analysis/DeallocUseAfterFreeErrors.m
+++ b/clang/test/Analysis/DeallocUseAfterFreeErrors.m
@@ -1,6 +1,6 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core,osx.cocoa.SuperDealloc,debug.ExprInspection -analyzer-output=text -verify %s
-void clang_analyzer_warnIfReached();
+void clang_analyzer_warnIfReached(void);
#define nil ((id)0)
diff --git a/clang/test/Analysis/Inputs/ctu-other.c b/clang/test/Analysis/Inputs/ctu-other.c
index 48a3f322cbd5..2bb5a04d6c22 100644
--- a/clang/test/Analysis/Inputs/ctu-other.c
+++ b/clang/test/Analysis/Inputs/ctu-other.c
@@ -33,7 +33,7 @@ int g(struct S *ctx) {
// Test that asm import does not fail.
// TODO: Support the GNU extension asm keyword as well.
// Example using the GNU extension: asm("mov $42, %0" : "=r"(res));
-int inlineAsm() {
+int inlineAsm(void) {
int res;
__asm__("mov $42, %0"
: "=r"(res));
diff --git a/clang/test/Analysis/NSContainers.m b/clang/test/Analysis/NSContainers.m
index 74db771a52d0..f41189a5e1dc 100644
--- a/clang/test/Analysis/NSContainers.m
+++ b/clang/test/Analysis/NSContainers.m
@@ -112,33 +112,33 @@ typedef struct {
@end
// NSMutableArray API
-void testNilArgNSMutableArray1() {
+void testNilArgNSMutableArray1(void) {
NSMutableArray *marray = [[NSMutableArray alloc] init];
[marray addObject:0]; // expected-warning {{Argument to 'NSMutableArray' method 'addObject:' cannot be nil}}
}
-void testNilArgNSMutableArray2() {
+void testNilArgNSMutableArray2(void) {
NSMutableArray *marray = [[NSMutableArray alloc] init];
[marray insertObject:0 atIndex:1]; // expected-warning {{Argument to 'NSMutableArray' method 'insertObject:atIndex:' cannot be nil}}
}
-void testNilArgNSMutableArray3() {
+void testNilArgNSMutableArray3(void) {
NSMutableArray *marray = [[NSMutableArray alloc] init];
[marray replaceObjectAtIndex:1 withObject:0]; // expected-warning {{Argument to 'NSMutableArray' method 'replaceObjectAtIndex:withObject:' cannot be nil}}
}
-void testNilArgNSMutableArray4() {
+void testNilArgNSMutableArray4(void) {
NSMutableArray *marray = [[NSMutableArray alloc] init];
[marray setObject:0 atIndexedSubscript:1]; // expected-warning {{Argument to 'NSMutableArray' method 'setObject:atIndexedSubscript:' cannot be nil}}
}
-void testNilArgNSMutableArray5() {
+void testNilArgNSMutableArray5(void) {
NSMutableArray *marray = [[NSMutableArray alloc] init];
marray[1] = 0; // expected-warning {{Array element cannot be nil}}
}
// NSArray API
-void testNilArgNSArray1() {
+void testNilArgNSArray1(void) {
NSArray *array = [[NSArray alloc] init];
NSArray *copyArray = [array arrayByAddingObject:0]; // expected-warning {{Argument to 'NSArray' method 'arrayByAddingObject:' cannot be nil}}
}
@@ -224,7 +224,7 @@ void idc2(id x) {
if (!x)
return;
}
-Foo *retNil() {
+Foo *retNil(void) {
return 0;
}
@@ -282,7 +282,7 @@ void testCountAwareNSOrderedSet(NSOrderedSet *containers, int *validptr) {
}
}
-void testLiteralsNonNil() {
+void testLiteralsNonNil(void) {
clang_analyzer_eval(!!@[]); // expected-warning{{TRUE}}
clang_analyzer_eval(!!@{}); // expected-warning{{TRUE}}
}
diff --git a/clang/test/Analysis/NSString.m b/clang/test/Analysis/NSString.m
index a53fc1e56624..59e333aea32d 100644
--- a/clang/test/Analysis/NSString.m
+++ b/clang/test/Analysis/NSString.m
@@ -134,7 +134,7 @@ NSString* f7(NSString* s1, NSString* s2, NSString* s3) {
return s4;
}
-NSMutableArray* f8() {
+NSMutableArray* f8(void) {
NSString* s = [[NSString alloc] init];
NSMutableArray* a = [[NSMutableArray alloc] initWithCapacity:2];
@@ -143,7 +143,7 @@ NSMutableArray* f8() {
return a;
}
-void f9() {
+void f9(void) {
NSString* s = [[NSString alloc] init];
NSString* q = s;
@@ -151,7 +151,7 @@ void f9() {
[q release]; // expected-warning {{used after it is released}}
}
-NSString* f10() {
+NSString* f10(void) {
static NSString* s = 0;
if (!s) s = [[NSString alloc] init];
return s; // no-warning
@@ -172,7 +172,7 @@ NSString* f11(CFDictionaryRef dict, const char* key) {
// Test case for passing a tracked object by-reference to a function we
// don't understand.
void unknown_function_f12(NSString** s);
-void f12() {
+void f12(void) {
NSString *string = [[NSString alloc] init];
unknown_function_f12(&string); // no-warning
}
@@ -275,7 +275,7 @@ void f14(MyString *s) {
}
@end
-id testSharedClassFromFunction() {
+id testSharedClassFromFunction(void) {
return [[SharedClass alloc] _init]; // no-warning
}
@@ -300,7 +300,7 @@ extern BOOL objc_atomicCompareAndSwapPtr(id predicate, id replacement, volatile
}
#endif
-void testOSCompareAndSwap() {
+void testOSCompareAndSwap(void) {
NSString *old = 0;
NSString *s = [[NSString alloc] init]; // no-warning
if (!OSAtomicCompareAndSwapPtr(0, s, (void**) &old))
@@ -309,7 +309,7 @@ void testOSCompareAndSwap() {
[old release];
}
-void testOSCompareAndSwapXXBarrier_local() {
+void testOSCompareAndSwapXXBarrier_local(void) {
NSString *old = 0;
NSString *s = [[NSString alloc] init]; // no-warning
if (!COMPARE_SWAP_BARRIER((intptr_t) 0, (intptr_t) s, (intptr_t*) &old))
@@ -318,7 +318,7 @@ void testOSCompareAndSwapXXBarrier_local() {
[old release];
}
-void testOSCompareAndSwapXXBarrier_local_no_direct_release() {
+void testOSCompareAndSwapXXBarrier_local_no_direct_release(void) {
NSString *old = 0;
NSString *s = [[NSString alloc] init]; // no-warning
if (!COMPARE_SWAP_BARRIER((intptr_t) 0, (intptr_t) s, (intptr_t*) &old))
@@ -333,7 +333,7 @@ int testOSCompareAndSwapXXBarrier_id(Class myclass, id xclass) {
return 0;
}
-void test_objc_atomicCompareAndSwap_local() {
+void test_objc_atomicCompareAndSwap_local(void) {
NSString *old = 0;
NSString *s = [[NSString alloc] init]; // no-warning
if (!objc_atomicCompareAndSwapPtr(0, s, &old))
@@ -342,7 +342,7 @@ void test_objc_atomicCompareAndSwap_local() {
[old release];
}
-void test_objc_atomicCompareAndSwap_local_no_direct_release() {
+void test_objc_atomicCompareAndSwap_local_no_direct_release(void) {
NSString *old = 0;
NSString *s = [[NSString alloc] init]; // no-warning
if (!objc_atomicCompareAndSwapPtr(0, s, &old))
@@ -369,13 +369,13 @@ void test_objc_atomicCompareAndSwap_parameter_no_direct_release(NSString **old)
// Test stringWithFormat (<rdar://problem/6815234>)
-void test_stringWithFormat() {
+void test_stringWithFormat(void) {
NSString *string = [[NSString stringWithFormat:@"%ld", (long) 100] retain];
[string release];
[string release]; // expected-warning{{Incorrect decrement of the reference count}}
}
-// Test isTrackedObjectType().
+// Test isTrackedObjectType(void).
typedef NSString* WonkyTypedef;
@interface TestIsTracked
+ (WonkyTypedef)newString;
diff --git a/clang/test/Analysis/NSWindow.m b/clang/test/Analysis/NSWindow.m
index e247ff18ceb0..aa36227f94d2 100644
--- a/clang/test/Analysis/NSWindow.m
+++ b/clang/test/Analysis/NSWindow.m
@@ -44,7 +44,7 @@ extern NSString *NSWindowDidBecomeKeyNotification;
// Test cases.
-void f1() {
+void f1(void) {
NSWindow *window = [[NSWindow alloc]
initWithContentRect:NSMakeRect(0,0,100,100)
styleMask:NSTitledWindowMask|NSClosableWindowMask
@@ -54,7 +54,7 @@ void f1() {
[window orderFrontRegardless]; // no-warning
}
-void f2() {
+void f2(void) {
NSWindow *window = [[NSWindow alloc]
initWithContentRect:NSMakeRect(0,0,100,100)
styleMask:NSTitledWindowMask|NSClosableWindowMask
@@ -65,7 +65,7 @@ void f2() {
[window orderFrontRegardless]; // no-warning
}
-void f2b() {
+void f2b(void) {
// FIXME: NSWindow doesn't own itself until it is displayed.
NSWindow *window = [[NSWindow alloc] // no-warning
initWithContentRect:NSMakeRect(0,0,100,100)
@@ -80,7 +80,7 @@ void f2b() {
}
-void f3() {
+void f3(void) {
// FIXME: For now we don't track NSWindow.
NSWindow *window = [NSWindow alloc]; // expected-warning{{never read}}
}
diff --git a/clang/test/Analysis/NoReturn.m b/clang/test/Analysis/NoReturn.m
index c08fd0dc5156..f7d5cbf3a33e 100644
--- a/clang/test/Analysis/NoReturn.m
+++ b/clang/test/Analysis/NoReturn.m
@@ -95,7 +95,7 @@ int testCustomException(int *x) {
- (void) alsoDoesNotReturn __attribute__((analyzer_noreturn));
@end
-void test_rdar11634353() {
+void test_rdar11634353(void) {
[Radar11634353 doesNotReturn];
int *p = 0;
*p = 0xDEADBEEF; // no-warning
@@ -107,7 +107,7 @@ void test_rdar11634352_instance(Radar11634353 *o) {
*p = 0xDEADBEEF; // no-warning
}
-void test_rdar11634353_positive() {
+void test_rdar11634353_positive(void) {
int *p = 0;
*p = 0xDEADBEEF; // expected-warning {{null pointer}}
}
@@ -126,7 +126,7 @@ void PR11959(int *p) {
// Test that hard-coded Microsoft _wassert name is recognized as a noreturn
#define assert(_Expression) (void)( (!!(_Expression)) || (_wassert(#_Expression, __FILE__, __LINE__), 0) )
extern void _wassert(const char * _Message, const char *_File, unsigned _Line);
-void test_wassert() {
+void test_wassert(void) {
assert(0);
int *p = 0;
*p = 0xDEADBEEF; // no-warning
@@ -137,7 +137,7 @@ void test_wassert() {
#define assert(_Expression) ((_Expression) ? (void)0 : __assert2(0, 0, 0, 0));
extern void __assert2(const char *, int, const char *, const char *);
extern void _wassert(const char * _Message, const char *_File, unsigned _Line);
-void test___assert2() {
+void test___assert2(void) {
assert(0);
int *p = 0;
*p = 0xDEADBEEF; // no-warning
diff --git a/clang/test/Analysis/OSAtomic_mac.c b/clang/test/Analysis/OSAtomic_mac.c
index b09c71f6c6e9..272aada712e3 100644
--- a/clang/test/Analysis/OSAtomic_mac.c
+++ b/clang/test/Analysis/OSAtomic_mac.c
@@ -7,7 +7,7 @@ int OSAtomicCompareAndSwapPtrBarrier() {
// but we should trust our BodyFarm instead.
}
-int *invalidSLocOnRedecl() {
+int *invalidSLocOnRedecl(void) {
// Was crashing when trying to throw a report about returning an uninitialized
// value to the caller. FIXME: We should probably still throw that report,
// something like "The "compare" part of CompareAndSwap depends on an
@@ -17,7 +17,7 @@ int *invalidSLocOnRedecl() {
return b;
}
-void testThatItActuallyWorks() {
+void testThatItActuallyWorks(void) {
void *x = 0;
int res = OSAtomicCompareAndSwapPtrBarrier(0, &x, &x);
clang_analyzer_eval(res); // expected-warning{{TRUE}}
diff --git a/clang/test/Analysis/UserNullabilityAnnotations.m b/clang/test/Analysis/UserNullabilityAnnotations.m
index 5e708c7aca58..cb6c288b6782 100644
--- a/clang/test/Analysis/UserNullabilityAnnotations.m
+++ b/clang/test/Analysis/UserNullabilityAnnotations.m
@@ -26,7 +26,7 @@ typedef struct NestedNonnullMember {
int *_Nonnull Value;
} NestedNonnullMember;
-NestedNonnullMember *foo();
+NestedNonnullMember *foo(void);
void f1(NestedNonnullMember *Root) {
NestedNonnullMember *Grandson = Root->Child->Child;
diff --git a/clang/test/Analysis/_Bool-increment-decrement.c b/clang/test/Analysis/_Bool-increment-decrement.c
index 477b6ed43830..6e55b0731a69 100644
--- a/clang/test/Analysis/_Bool-increment-decrement.c
+++ b/clang/test/Analysis/_Bool-increment-decrement.c
@@ -2,7 +2,7 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=debug.ExprInspection -verify -std=c11 -Dbool=_Bool -Dtrue=1 -Dfalse=0 %s
extern void clang_analyzer_eval(bool);
-void test__Bool_value() {
+void test__Bool_value(void) {
{
bool b = true;
clang_analyzer_eval(b == 1); // expected-warning{{TRUE}}
@@ -36,7 +36,7 @@ void test__Bool_value() {
}
}
-void test__Bool_increment() {
+void test__Bool_increment(void) {
{
bool b = true;
b++;
@@ -87,7 +87,7 @@ void test__Bool_increment() {
}
}
-void test__Bool_decrement() {
+void test__Bool_decrement(void) {
{
bool b = true;
b--;
diff --git a/clang/test/Analysis/analyzer-display-progress.m b/clang/test/Analysis/analyzer-display-progress.m
index 8d0b3d6d5679..24414f659c39 100644
--- a/clang/test/Analysis/analyzer-display-progress.m
+++ b/clang/test/Analysis/analyzer-display-progress.m
@@ -2,7 +2,7 @@
#include "Inputs/system-header-simulator-objc.h"
-static void f() {}
+static void f(void) {}
@interface I: NSObject
-(void)instanceMethod:(int)arg1 with:(int)arg2;
diff --git a/clang/test/Analysis/analyzer-stats.c b/clang/test/Analysis/analyzer-stats.c
index b58e862f6c65..69c61e17eb1f 100644
--- a/clang/test/Analysis/analyzer-stats.c
+++ b/clang/test/Analysis/analyzer-stats.c
@@ -1,8 +1,8 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core,deadcode.DeadStores,debug.Stats -verify -Wno-unreachable-code -analyzer-opt-analyze-nested-blocks -analyzer-max-loop 4 %s
-int foo();
+int foo(void);
-int test() { // expected-warning-re{{test -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 0 | Exhausted Block: no | Empty WorkList: yes}}
+int test(void) { // expected-warning-re{{test -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 0 | Exhausted Block: no | Empty WorkList: yes}}
int a = 1;
a = 34 / 12;
@@ -14,7 +14,7 @@ int test() { // expected-warning-re{{test -> Total CFGBlocks: {{[0-9]+}} | Unrea
}
-int sink() // expected-warning-re{{sink -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 1 | Exhausted Block: yes | Empty WorkList: yes}}
+int sink(void) // expected-warning-re{{sink -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 1 | Exhausted Block: yes | Empty WorkList: yes}}
{
for (int i = 0; i < 10; ++i) // expected-warning {{(sink): The analyzer generated a sink at this point}}
++i;
@@ -22,7 +22,7 @@ int sink() // expected-warning-re{{sink -> Total CFGBlocks: {{[0-9]+}} | Unreach
return 0;
}
-int emptyConditionLoop() // expected-warning-re{{emptyConditionLoop -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 0 | Exhausted Block: yes | Empty WorkList: yes}}
+int emptyConditionLoop(void) // expected-warning-re{{emptyConditionLoop -> Total CFGBlocks: {{[0-9]+}} | Unreachable CFGBlocks: 0 | Exhausted Block: yes | Empty WorkList: yes}}
{
int num = 1;
for (;;)
diff --git a/clang/test/Analysis/arc-zero-init.m b/clang/test/Analysis/arc-zero-init.m
index de1e978cd0d8..831d7003cae9 100644
--- a/clang/test/Analysis/arc-zero-init.m
+++ b/clang/test/Analysis/arc-zero-init.m
@@ -8,7 +8,7 @@
@interface SomeClass
@end
-void simpleStrongPointerValue() {
+void simpleStrongPointerValue(void) {
SomeClass *x;
if (x) {}
#if !__has_feature(objc_arc)
@@ -16,7 +16,7 @@ void simpleStrongPointerValue() {
#endif
}
-void simpleArray() {
+void simpleArray(void) {
SomeClass *vlaArray[5];
if (vlaArray[0]) {}
@@ -25,7 +25,7 @@ void simpleArray() {
#endif
}
-void variableLengthArray() {
+void variableLengthArray(void) {
int count = 1;
SomeClass * vlaArray[count];
@@ -35,7 +35,7 @@ void variableLengthArray() {
#endif
}
-void variableLengthArrayWithExplicitStrongAttribute() {
+void variableLengthArrayWithExplicitStrongAttribute(void) {
int count = 1;
__attribute__((objc_ownership(strong))) SomeClass * vlaArray[count];
diff --git a/clang/test/Analysis/array-struct-region.c b/clang/test/Analysis/array-struct-region.c
index c27abfb6ac98..657be62c94e6 100644
--- a/clang/test/Analysis/array-struct-region.c
+++ b/clang/test/Analysis/array-struct-region.c
@@ -2,7 +2,7 @@
void clang_analyzer_eval(int);
-int string_literal_init() {
+int string_literal_init(void) {
char a[] = "abc";
char b[2] = "abc"; // expected-warning{{too long}}
char c[5] = "abc";
@@ -42,7 +42,7 @@ void nested_compound_literals_float(float rad) {
}
-void struct_as_array() {
+void struct_as_array(void) {
struct simple { int x; int y; };
struct simple a;
struct simple *p = &a;
@@ -60,14 +60,14 @@ void struct_as_array() {
// PR13264 / <rdar://problem/11802440>
struct point { int x; int y; };
struct circle { struct point o; int r; };
-struct circle get_circle() {
+struct circle get_circle(void) {
struct circle result;
result.r = 5;
result.o = (struct point){0, 0};
return result;
}
-void struct_in_struct() {
+void struct_in_struct(void) {
struct circle c;
c = get_circle();
// This used to think c.r was undefined because c.o is a LazyCompoundVal.
@@ -77,14 +77,14 @@ void struct_in_struct() {
// We also test with floats because we don't model floats right now,
// and the original bug report used a float.
struct circle_f { struct point o; float r; };
-struct circle_f get_circle_f() {
+struct circle_f get_circle_f(void) {
struct circle_f result;
result.r = 5.0;
result.o = (struct point){0, 0};
return result;
}
-float struct_in_struct_f() {
+float struct_in_struct_f(void) {
struct circle_f c;
c = get_circle_f();
@@ -92,7 +92,7 @@ float struct_in_struct_f() {
}
-int randomInt();
+int randomInt(void);
int testSymbolicInvalidation(int index) {
int vals[10];
@@ -122,7 +122,7 @@ typedef struct {
int x, y, z;
} S;
-S makeS();
+S makeS(void);
int testSymbolicInvalidationStruct(int index) {
S vals[10];
@@ -183,7 +183,7 @@ int testConcreteInvalidationDoubleStruct(int index) {
}
-int testNonOverlappingStructFieldsSimple() {
+int testNonOverlappingStructFieldsSimple(void) {
S val;
val.x = 1;
@@ -277,7 +277,7 @@ typedef struct {
int length;
} ShortStringWrapper;
-void testArrayStructCopy() {
+void testArrayStructCopy(void) {
ShortString s = { "abc" };
ShortString s2 = s;
ShortString s3 = s2;
@@ -294,7 +294,7 @@ void testArrayStructCopy() {
clang_analyzer_eval(s4.data[2] == 'c'); // expected-warning{{TRUE}}
}
-void testArrayStructCopyNested() {
+void testArrayStructCopyNested(void) {
ShortString s = { "abc" };
ShortString s2 = s;
diff --git a/clang/test/Analysis/array-struct-region.cpp b/clang/test/Analysis/array-struct-region.cpp
index 1b9fa3e8db55..31cbb60ba991 100644
--- a/clang/test/Analysis/array-struct-region.cpp
+++ b/clang/test/Analysis/array-struct-region.cpp
@@ -46,16 +46,16 @@ bool operator ~(const struct S &s) { return (&s) != &s; }
#ifdef INLINE
-struct S getS() {
+struct S getS(void) {
struct S s = { 42 };
return s;
}
#else
-struct S getS();
+struct S getS(void);
#endif
-void testAssignment() {
+void testAssignment(void) {
struct S s = getS();
if (s.field != 42) return;
@@ -78,7 +78,7 @@ void testAssignment() {
}
-void testImmediateUse() {
+void testImmediateUse(void) {
int x = getS().field;
if (x != 42) return;
@@ -105,12 +105,12 @@ int getAssignedField(struct S s) {
return s.field;
}
-void testArgument() {
+void testArgument(void) {
clang_analyzer_eval(getConstrainedField(getS()) == 42); // expected-warning{{TRUE}}
clang_analyzer_eval(getAssignedField(getS()) == 42); // expected-warning{{TRUE}}
}
-void testImmediateUseParens() {
+void testImmediateUseParens(void) {
int x = ((getS())).field;
if (x != 42) return;
diff --git a/clang/test/Analysis/array-struct.c b/clang/test/Analysis/array-struct.c
index 45c4c9d4ad17..a609f9abfa3e 100644
--- a/clang/test/Analysis/array-struct.c
+++ b/clang/test/Analysis/array-struct.c
@@ -28,19 +28,19 @@ void f(void) {
// StringLiteral in lvalue context and pointer to array type.
// p: ElementRegion, q: StringRegion
-void f2() {
+void f2(void) {
char *p = "/usr/local";
char (*q)[4];
q = &"abc";
}
// Typedef'ed struct definition.
-void f3() {
+void f3(void) {
STYPE s;
}
// Initialize array with InitExprList.
-void f4() {
+void f4(void) {
int a[] = { 1, 2, 3};
int b[3] = { 1, 2 };
struct s c[] = {{1,{1}}};
@@ -48,13 +48,13 @@ void f4() {
// Struct variable in lvalue context.
// Assign UnknownVal to the whole struct.
-void f5() {
+void f5(void) {
struct s data;
g1(&data);
}
// AllocaRegion test.
-void f6() {
+void f6(void) {
char *p;
p = __builtin_alloca(10);
g(p);
@@ -70,30 +70,30 @@ struct s2;
void g2(struct s2 *p);
// Incomplete struct pointer used as function argument.
-void f7() {
+void f7(void) {
struct s2 *p = __builtin_alloca(10);
g2(p);
}
// sizeof() is unsigned while -1 is signed in array index.
-void f8() {
+void f8(void) {
int a[10];
a[sizeof(a)/sizeof(int) - 1] = 1; // no-warning
}
// Initialization of struct array elements.
-void f9() {
+void f9(void) {
struct s a[10];
}
// Initializing array with string literal.
-void f10() {
+void f10(void) {
char a1[4] = "abc";
char a3[6] = "abc";
}
// Retrieve the default value of element/field region.
-void f11() {
+void f11(void) {
struct s a;
g1(&a);
if (a.data == 0) // no-warning
@@ -129,25 +129,25 @@ struct s3 {
static struct s3 opt;
// Test if the embedded array is retrieved correctly.
-void f14() {
+void f14(void) {
struct s3 my_opt = opt;
}
void bar(int*);
-struct s3 gets3() {
+struct s3 gets3(void) {
struct s3 s;
return s;
}
-void accessArrayFieldNoCrash() {
+void accessArrayFieldNoCrash(void) {
bar(gets3().a);
bar((gets3().a));
bar(((gets3().a)));
}
// Test if the array is correctly invalidated.
-void f15() {
+void f15(void) {
int a[10];
bar(a);
if (a[1]) // no-warning
@@ -167,7 +167,7 @@ void f16(struct s3 *p) {
void inv(struct s1 *);
// Invalidate the struct field.
-void f17() {
+void f17(void) {
struct s1 t;
int x;
inv(&t);
@@ -177,7 +177,7 @@ void f17() {
void read(char*);
-void f18() {
+void f18(void) {
char *q;
char *p = (char *) __builtin_alloca(10);
read(p);
diff --git a/clang/test/Analysis/assume-controlled-environment.c b/clang/test/Analysis/assume-controlled-environment.c
index 749b8198f6fb..fce1a1e7bae3 100644
--- a/clang/test/Analysis/assume-controlled-environment.c
+++ b/clang/test/Analysis/assume-controlled-environment.c
@@ -16,7 +16,7 @@
char *getenv(const char *name);
-void foo() {
+void foo(void) {
char *p = getenv("FOO"); // untrusted-env-warning {{tainted}}
(void)p; // untrusted-env-warning {{tainted}}
}
diff --git a/clang/test/Analysis/blocks-no-inline.c b/clang/test/Analysis/blocks-no-inline.c
index 9fa3138ec277..78af92cb4994 100644
--- a/clang/test/Analysis/blocks-no-inline.c
+++ b/clang/test/Analysis/blocks-no-inline.c
@@ -3,7 +3,7 @@
void clang_analyzer_eval(int);
-void testInvalidation() {
+void testInvalidation(void) {
__block int i = 0;
^{
++i;
@@ -15,7 +15,7 @@ void testInvalidation() {
const int globalConstant = 1;
-void testCapturedConstants() {
+void testCapturedConstants(void) {
const int localConstant = 2;
static const int staticConstant = 3;
@@ -28,7 +28,7 @@ void testCapturedConstants() {
typedef const int constInt;
constInt anotherGlobalConstant = 1;
-void testCapturedConstantsTypedef() {
+void testCapturedConstantsTypedef(void) {
constInt localConstant = 2;
static constInt staticConstant = 3;
diff --git a/clang/test/Analysis/blocks-nrvo.c b/clang/test/Analysis/blocks-nrvo.c
index bb0be869ee76..89b7fd39577b 100644
--- a/clang/test/Analysis/blocks-nrvo.c
+++ b/clang/test/Analysis/blocks-nrvo.c
@@ -6,7 +6,7 @@ typedef struct {
int x;
} S;
-void foo() {
+void foo(void) {
^{
S s;
return s; // no-crash
diff --git a/clang/test/Analysis/blocks.m b/clang/test/Analysis/blocks.m
index a21a605ffa61..01b0ce6f02b9 100644
--- a/clang/test/Analysis/blocks.m
+++ b/clang/test/Analysis/blocks.m
@@ -82,27 +82,27 @@ void test1(NSString *format, ...) {
// test2 - Test that captured variables that are uninitialized are flagged
// as such.
-void test2() {
+void test2(void) {
static int y = 0;
int x;
^{ y = x + 1; }(); // expected-warning{{Variable 'x' is uninitialized when captured by block}}
}
-void test2_b() {
+void test2_b(void) {
static int y = 0;
__block int x;
^{ y = x + 1; }(); // expected-warning {{left operand of '+' is a garbage value}}
}
-void test2_c() {
+void test2_c(void) {
typedef void (^myblock)(void);
- myblock f = ^() { f(); }; // expected-warning{{Variable 'f' is uninitialized when captured by block}}
+ myblock f = ^(void) { f(); }; // expected-warning{{Variable 'f' is uninitialized when captured by block}}
}
-void testMessaging() {
+void testMessaging(void) {
// <rdar://problem/12119814>
- [[^(){} copy] release];
+ [[^(void){} copy] release];
}
@@ -133,8 +133,8 @@ void testMessaging() {
}
@end
-void testReturnVariousSignatures() {
- (void)^int(){
+void testReturnVariousSignatures(void) {
+ (void)^int(void){
return 42;
}();
@@ -142,7 +142,7 @@ void testReturnVariousSignatures() {
return 42;
}();
- (void)^(){
+ (void)^(void){
return 42;
}();
@@ -173,7 +173,7 @@ void blockCapturesItselfInTheLoop(int x, int m) {
void takeNonnullBlock(void (^)(void)) __attribute__((nonnull));
void takeNonnullIntBlock(int (^)(void)) __attribute__((nonnull));
-void testCallContainingWithSignature1()
+void testCallContainingWithSignature1(void)
{
takeNonnullBlock(^{
static const char str[] = "Lost connection to sharingd";
@@ -181,7 +181,7 @@ void testCallContainingWithSignature1()
});
}
-void testCallContainingWithSignature2()
+void testCallContainingWithSignature2(void)
{
takeNonnullBlock(^void{
static const char str[] = "Lost connection to sharingd";
@@ -189,15 +189,15 @@ void testCallContainingWithSignature2()
});
}
-void testCallContainingWithSignature3()
+void testCallContainingWithSignature3(void)
{
- takeNonnullBlock(^void(){
+ takeNonnullBlock(^void(void){
static const char str[] = "Lost connection to sharingd";
testCallContainingWithSignature3();
});
}
-void testCallContainingWithSignature4()
+void testCallContainingWithSignature4(void)
{
takeNonnullBlock(^void(void){
static const char str[] = "Lost connection to sharingd";
@@ -205,7 +205,7 @@ void testCallContainingWithSignature4()
});
}
-void testCallContainingWithSignature5()
+void testCallContainingWithSignature5(void)
{
takeNonnullIntBlock(^{
static const char str[] = "Lost connection to sharingd";
@@ -240,13 +240,13 @@ __attribute__((objc_root_class))
// The incorrect block variable initialization below is a hard compile-time
// error in C++.
#if !defined(__cplusplus)
-void call_block_with_fewer_arguments() {
+void call_block_with_fewer_arguments(void) {
void (^b)() = ^(int a) { };
b(); // expected-warning {{Block taking 1 argument is called with fewer (0)}}
}
#endif
-int getBlockFlags() {
+int getBlockFlags(void) {
int x = 0;
return ((struct Block_layout *)^{ (void)x; })->flags; // no-warning
}
diff --git a/clang/test/Analysis/bsd-string.c b/clang/test/Analysis/bsd-string.c
index e4119058507f..a7d06e71e978 100644
--- a/clang/test/Analysis/bsd-string.c
+++ b/clang/test/Analysis/bsd-string.c
@@ -12,12 +12,12 @@ size_t strlcat(char *dst, const char *src, size_t n);
size_t strlen(const char *s);
void clang_analyzer_eval(int);
-void f1() {
+void f1(void) {
char overlap[] = "123456789";
strlcpy(overlap, overlap + 1, 3); // expected-warning{{Arguments must not be overlapping buffers}}
}
-void f2() {
+void f2(void) {
char buf[5];
size_t len;
len = strlcpy(buf, "abcd", sizeof(buf)); // expected-no-warning
@@ -26,33 +26,33 @@ void f2() {
clang_analyzer_eval(len == 8); // expected-warning{{TRUE}}
}
-void f3() {
+void f3(void) {
char dst[2];
const char *src = "abdef";
strlcpy(dst, src, 5); // expected-warning{{String copy function overflows the destination buffer}}
}
-void f4() {
+void f4(void) {
strlcpy(NULL, "abcdef", 6); // expected-warning{{Null pointer passed as 1st argument to string copy function}}
}
-void f5() {
+void f5(void) {
strlcat(NULL, "abcdef", 6); // expected-warning{{Null pointer passed as 1st argument to string concatenation function}}
}
-void f6() {
+void f6(void) {
char buf[8];
strlcpy(buf, "abc", 3);
size_t len = strlcat(buf, "defg", 4);
clang_analyzer_eval(len == 7); // expected-warning{{TRUE}}
}
-int f7() {
+int f7(void) {
char buf[8];
return strlcpy(buf, "1234567", 0); // no-crash
}
-void f8(){
+void f8(void){
char buf[5];
size_t len;
@@ -116,7 +116,7 @@ void f9(int unknown_size, char* unknown_src, char* unknown_dst){
// expected-warning@-1 {{String concatenation function overflows the destination buffer}}
}
-void f10(){
+void f10(void){
char buf[8];
size_t len;
@@ -126,7 +126,7 @@ void f10(){
// expected-warning@-1 {{String concatenation function overflows the destination buffer}}
}
-void f11() {
+void f11(void) {
//test for Bug 41729
char a[256], b[256];
strlcpy(a, "world", sizeof(a));
@@ -135,7 +135,7 @@ void f11() {
}
int a, b;
-void unknown_val_crash() {
+void unknown_val_crash(void) {
// We're unable to evaluate the integer-to-pointer cast.
strlcat(&b, a, 0); // no-crash
}
diff --git a/clang/test/Analysis/bstring.c b/clang/test/Analysis/bstring.c
index 0deb4754c3b2..c88452e49075 100644
--- a/clang/test/Analysis/bstring.c
+++ b/clang/test/Analysis/bstring.c
@@ -71,7 +71,7 @@ void *memcpy(void *restrict s1, const void *restrict s2, size_t n);
#endif /* VARIANT */
-void memcpy0 () {
+void memcpy0 (void) {
char src[] = {1, 2, 3, 4};
char dst[4] = {0};
@@ -84,14 +84,14 @@ void memcpy0 () {
clang_analyzer_eval(dst[0] != 0); // expected-warning{{UNKNOWN}}
}
-void memcpy1 () {
+void memcpy1 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
memcpy(dst, src, 5); // expected-warning{{Memory copy function accesses out-of-bound array element}}
}
-void memcpy2 () {
+void memcpy2 (void) {
char src[] = {1, 2, 3, 4};
char dst[1];
@@ -101,21 +101,21 @@ void memcpy2 () {
#endif
}
-void memcpy3 () {
+void memcpy3 (void) {
char src[] = {1, 2, 3, 4};
char dst[3];
memcpy(dst+1, src+2, 2); // no-warning
}
-void memcpy4 () {
+void memcpy4 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
memcpy(dst+2, src+2, 3); // expected-warning{{Memory copy function accesses out-of-bound array element}}
}
-void memcpy5() {
+void memcpy5(void) {
char src[] = {1, 2, 3, 4};
char dst[3];
@@ -125,43 +125,43 @@ void memcpy5() {
#endif
}
-void memcpy6() {
+void memcpy6(void) {
int a[4] = {0};
memcpy(a, a, 8); // expected-warning{{overlapping}}
}
-void memcpy7() {
+void memcpy7(void) {
int a[4] = {0};
memcpy(a+2, a+1, 8); // expected-warning{{overlapping}}
}
-void memcpy8() {
+void memcpy8(void) {
int a[4] = {0};
memcpy(a+1, a+2, 8); // expected-warning{{overlapping}}
}
-void memcpy9() {
+void memcpy9(void) {
int a[4] = {0};
memcpy(a+2, a+1, 4); // no-warning
memcpy(a+1, a+2, 4); // no-warning
}
-void memcpy10() {
+void memcpy10(void) {
char a[4] = {0};
memcpy(0, a, 4); // expected-warning{{Null pointer passed as 1st argument to memory copy function}}
}
-void memcpy11() {
+void memcpy11(void) {
char a[4] = {0};
memcpy(a, 0, 4); // expected-warning{{Null pointer passed as 2nd argument to memory copy function}}
}
-void memcpy12() {
+void memcpy12(void) {
char a[4] = {0};
memcpy(0, a, 0); // no-warning
}
-void memcpy13() {
+void memcpy13(void) {
char a[4] = {0};
memcpy(a, 0, 0); // no-warning
}
@@ -197,7 +197,7 @@ void *mempcpy(void *restrict s1, const void *restrict s2, size_t n);
#endif /* VARIANT */
-void mempcpy0 () {
+void mempcpy0 (void) {
char src[] = {1, 2, 3, 4};
char dst[5] = {0};
@@ -210,14 +210,14 @@ void mempcpy0 () {
clang_analyzer_eval(dst[0] != 0); // expected-warning{{UNKNOWN}}
}
-void mempcpy1 () {
+void mempcpy1 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
mempcpy(dst, src, 5); // expected-warning{{Memory copy function accesses out-of-bound array element}}
}
-void mempcpy2 () {
+void mempcpy2 (void) {
char src[] = {1, 2, 3, 4};
char dst[1];
@@ -227,21 +227,21 @@ void mempcpy2 () {
#endif
}
-void mempcpy3 () {
+void mempcpy3 (void) {
char src[] = {1, 2, 3, 4};
char dst[3];
mempcpy(dst+1, src+2, 2); // no-warning
}
-void mempcpy4 () {
+void mempcpy4 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
mempcpy(dst+2, src+2, 3); // expected-warning{{Memory copy function accesses out-of-bound array element}}
}
-void mempcpy5() {
+void mempcpy5(void) {
char src[] = {1, 2, 3, 4};
char dst[3];
@@ -251,48 +251,48 @@ void mempcpy5() {
#endif
}
-void mempcpy6() {
+void mempcpy6(void) {
int a[4] = {0};
mempcpy(a, a, 8); // expected-warning{{overlapping}}
}
-void mempcpy7() {
+void mempcpy7(void) {
int a[4] = {0};
mempcpy(a+2, a+1, 8); // expected-warning{{overlapping}}
}
-void mempcpy8() {
+void mempcpy8(void) {
int a[4] = {0};
mempcpy(a+1, a+2, 8); // expected-warning{{overlapping}}
}
-void mempcpy9() {
+void mempcpy9(void) {
int a[4] = {0};
mempcpy(a+2, a+1, 4); // no-warning
mempcpy(a+1, a+2, 4); // no-warning
}
-void mempcpy10() {
+void mempcpy10(void) {
char a[4] = {0};
mempcpy(0, a, 4); // expected-warning{{Null pointer passed as 1st argument to memory copy function}}
}
-void mempcpy11() {
+void mempcpy11(void) {
char a[4] = {0};
mempcpy(a, 0, 4); // expected-warning{{Null pointer passed as 2nd argument to memory copy function}}
}
-void mempcpy12() {
+void mempcpy12(void) {
char a[4] = {0};
mempcpy(0, a, 0); // no-warning
}
-void mempcpy13() {
+void mempcpy13(void) {
char a[4] = {0};
mempcpy(a, 0, 0); // no-warning
}
-void mempcpy14() {
+void mempcpy14(void) {
int src[] = {1, 2, 3, 4};
int dst[5] = {0};
int *p;
@@ -307,7 +307,7 @@ struct st {
int j;
};
-void mempcpy15() {
+void mempcpy15(void) {
struct st s1 = {0};
struct st s2;
struct st *p1;
@@ -319,7 +319,7 @@ void mempcpy15() {
clang_analyzer_eval(p1 == p2); // expected-warning{{TRUE}}
}
-void mempcpy16() {
+void mempcpy16(void) {
struct st s1[10] = {{0}};
struct st s2[10];
struct st *p1;
@@ -362,7 +362,7 @@ void *memmove(void *s1, const void *s2, size_t n);
#endif /* VARIANT */
-void memmove0 () {
+void memmove0 (void) {
char src[] = {1, 2, 3, 4};
char dst[4] = {0};
@@ -375,14 +375,14 @@ void memmove0 () {
clang_analyzer_eval(dst[0] != 0); // expected-warning{{UNKNOWN}}
}
-void memmove1 () {
+void memmove1 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
memmove(dst, src, 5); // expected-warning{{out-of-bound}}
}
-void memmove2 () {
+void memmove2 (void) {
char src[] = {1, 2, 3, 4};
char dst[1];
@@ -410,28 +410,28 @@ int memcmp(const void *s1, const void *s2, size_t n);
#endif /* VARIANT */
-void memcmp0 () {
+void memcmp0 (void) {
char a[] = {1, 2, 3, 4};
char b[4] = { 0 };
memcmp(a, b, 4); // no-warning
}
-void memcmp1 () {
+void memcmp1 (void) {
char a[] = {1, 2, 3, 4};
char b[10] = { 0 };
memcmp(a, b, 5); // expected-warning{{out-of-bound}}
}
-void memcmp2 () {
+void memcmp2 (void) {
char a[] = {1, 2, 3, 4};
char b[1] = { 0 };
memcmp(a, b, 4); // expected-warning{{out-of-bound}}
}
-void memcmp3 () {
+void memcmp3 (void) {
char a[] = {1, 2, 3, 4};
clang_analyzer_eval(memcmp(a, a, 4) == 0); // expected-warning{{TRUE}}
@@ -483,7 +483,7 @@ int memcmp8(char *a, size_t n) {
void bcopy(/*const*/ void *s1, void *s2, size_t n);
-void bcopy0 () {
+void bcopy0 (void) {
char src[] = {1, 2, 3, 4};
char dst[4] = {0};
@@ -494,14 +494,14 @@ void bcopy0 () {
clang_analyzer_eval(dst[0] != 0); // expected-warning{{UNKNOWN}}
}
-void bcopy1 () {
+void bcopy1 (void) {
char src[] = {1, 2, 3, 4};
char dst[10];
bcopy(src, dst, 5); // expected-warning{{out-of-bound}}
}
-void bcopy2 () {
+void bcopy2 (void) {
char src[] = {1, 2, 3, 4};
char dst[1];
diff --git a/clang/test/Analysis/bug_hash_test.m b/clang/test/Analysis/bug_hash_test.m
index fbb70e5d626a..1510c12a0ab8 100644
--- a/clang/test/Analysis/bug_hash_test.m
+++ b/clang/test/Analysis/bug_hash_test.m
@@ -28,7 +28,7 @@ __attribute__((objc_root_class))
@end
-void testBlocks() {
+void testBlocks(void) {
int x = 5;
^{
clang_analyzer_hashDump(x); // expected-warning {{debug.ExprInspection$$29$clang_analyzer_hashDump(x);$Category}}
diff --git a/clang/test/Analysis/c11lock.c b/clang/test/Analysis/c11lock.c
index 78e62982fea1..0e867e9dada3 100644
--- a/clang/test/Analysis/c11lock.c
+++ b/clang/test/Analysis/c11lock.c
@@ -52,7 +52,7 @@ void bad5(void) {
mtx_unlock(&mtx1); // expected-warning {{This lock has already been unlocked}}
}
-void bad6() {
+void bad6(void) {
mtx_init(&mtx1, 0);
if (mtx_trylock(&mtx1) != thrd_success)
mtx_unlock(&mtx1); // expected-warning {{This lock has already been unlocked}}
@@ -65,7 +65,7 @@ void bad7(void) {
mtx_unlock(&mtx2);
}
-void good() {
+void good(void) {
mtx_t mtx;
mtx_init(&mtx, 0);
mtx_lock(&mtx);
@@ -73,7 +73,7 @@ void good() {
mtx_destroy(&mtx);
}
-void good2() {
+void good2(void) {
mtx_t mtx;
mtx_init(&mtx, 0);
if (mtx_trylock(&mtx) == thrd_success)
@@ -81,7 +81,7 @@ void good2() {
mtx_destroy(&mtx);
}
-void good3() {
+void good3(void) {
mtx_t mtx;
mtx_init(&mtx, 0);
if (mtx_timedlock(&mtx, 0) == thrd_success)
diff --git a/clang/test/Analysis/call-and-message.c b/clang/test/Analysis/call-and-message.c
index da62a6f5071a..b79ec8c344b6 100644
--- a/clang/test/Analysis/call-and-message.c
+++ b/clang/test/Analysis/call-and-message.c
@@ -11,7 +11,7 @@
// no-pointee-no-diagnostics
void doStuff_pointerToConstInt(const int *u){};
-void pointee_uninit() {
+void pointee_uninit(void) {
int i;
int *p = &i;
doStuff_pointerToConstInt(p); // expected-warning{{1st function call argument is a pointer to uninitialized value [core.CallAndMessage]}}
diff --git a/clang/test/Analysis/call-and-message.m b/clang/test/Analysis/call-and-message.m
index cef501400b72..b90ef571136c 100644
--- a/clang/test/Analysis/call-and-message.m
+++ b/clang/test/Analysis/call-and-message.m
@@ -62,7 +62,7 @@ extern NSString *const NSUndoManagerCheckpointNotification;
// Test cases.
//===----------------------------------------------------------------------===//
-unsigned f1() {
+unsigned f1(void) {
NSString *aString;
return [aString length]; // expected-warning {{Receiver in message expression is an uninitialized value [core.CallAndMessage]}}
}
diff --git a/clang/test/Analysis/casts.c b/clang/test/Analysis/casts.c
index ce195297874b..28667b6a043a 100644
--- a/clang/test/Analysis/casts.c
+++ b/clang/test/Analysis/casts.c
@@ -91,7 +91,7 @@ int foo (int* p) {
return 0;
}
-void castsToBool() {
+void castsToBool(void) {
clang_analyzer_eval(0); // expected-warning{{FALSE}}
clang_analyzer_eval(0U); // expected-warning{{FALSE}}
clang_analyzer_eval((void *)0); // expected-warning{{FALSE}}
@@ -128,7 +128,7 @@ void locAsIntegerCasts(void *p) {
clang_analyzer_eval(++x < 10); // no-crash // expected-warning{{UNKNOWN}}
}
-void multiDimensionalArrayPointerCasts() {
+void multiDimensionalArrayPointerCasts(void) {
static int x[10][10];
int *y1 = &(x[3][5]);
char *z = ((char *) y1) + 2;
@@ -154,15 +154,15 @@ void multiDimensionalArrayPointerCasts() {
clang_analyzer_eval(*((char *)y1) == *((char *) y3)); // expected-warning{{TRUE}}
}
-void *getVoidPtr();
+void *getVoidPtr(void);
-void testCastVoidPtrToIntPtrThroughIntTypedAssignment() {
+void testCastVoidPtrToIntPtrThroughIntTypedAssignment(void) {
int *x;
(*((int *)(&x))) = (int)getVoidPtr();
*x = 1; // no-crash
}
-void testCastUIntPtrToIntPtrThroughIntTypedAssignment() {
+void testCastUIntPtrToIntPtrThroughIntTypedAssignment(void) {
unsigned u;
int *x;
(*((int *)(&x))) = (int)&u;
@@ -170,7 +170,7 @@ void testCastUIntPtrToIntPtrThroughIntTypedAssignment() {
clang_analyzer_eval(u == 1); // expected-warning{{TRUE}}
}
-void testCastVoidPtrToIntPtrThroughUIntTypedAssignment() {
+void testCastVoidPtrToIntPtrThroughUIntTypedAssignment(void) {
int *x;
(*((int *)(&x))) = (int)(unsigned *)getVoidPtr();
*x = 1; // no-crash
@@ -187,7 +187,7 @@ void testLocNonLocSymbolRemainder(int a, int *b) {
}
}
-void testSwitchWithSizeofs() {
+void testSwitchWithSizeofs(void) {
switch (sizeof(char) == 1) { // expected-warning{{switch condition has boolean value}}
case sizeof(char):; // no-crash
}
@@ -219,8 +219,8 @@ void test_VectorSplat_cast(long x) {
#ifdef EAGERLY_ASSUME
int globalA;
-extern int globalFunc();
-void no_crash_on_symsym_cast_to_long() {
+extern int globalFunc(void);
+void no_crash_on_symsym_cast_to_long(void) {
char c = globalFunc() - 5;
c == 0;
globalA -= c;
@@ -240,7 +240,7 @@ char no_crash_SymbolCast_of_float_type_aux(int *p) {
return *p;
}
-void no_crash_SymbolCast_of_float_type() {
+void no_crash_SymbolCast_of_float_type(void) {
extern float x;
char (*f)() = no_crash_SymbolCast_of_float_type_aux;
f(&x);
diff --git a/clang/test/Analysis/casts.m b/clang/test/Analysis/casts.m
index 5c81ae6ffbe6..eda26c68d017 100644
--- a/clang/test/Analysis/casts.m
+++ b/clang/test/Analysis/casts.m
@@ -41,6 +41,6 @@ adium_media_ready_cb(RDR10087620 *InObj)
// PR16690
-_Bool testLocAsIntegerToBool() {
+_Bool testLocAsIntegerToBool(void) {
return (long long)&testLocAsIntegerToBool;
}
diff --git a/clang/test/Analysis/cert/env34-c.c b/clang/test/Analysis/cert/env34-c.c
index b3b14b0e7c42..3c1a3930d637 100644
--- a/clang/test/Analysis/cert/env34-c.c
+++ b/clang/test/Analysis/cert/env34-c.c
@@ -18,10 +18,10 @@ char *asctime(const tm *timeptr);
int strcmp(const char*, const char*);
extern void foo(char *e);
-extern char* bar();
+extern char* bar(void);
-void getenv_test1() {
+void getenv_test1(void) {
char *p;
p = getenv("VAR");
@@ -31,7 +31,7 @@ void getenv_test1() {
*p; // no-warning, getenv result was assigned to the same pointer
}
-void getenv_test2() {
+void getenv_test2(void) {
char *p, *p2;
p = getenv("VAR");
@@ -46,7 +46,7 @@ void getenv_test2() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test3() {
+void getenv_test3(void) {
char *p, *p2, *p3;
p = getenv("VAR");
@@ -64,7 +64,7 @@ void getenv_test3() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test4() {
+void getenv_test4(void) {
char *p, *p2, *p3;
p = getenv("VAR");
@@ -78,7 +78,7 @@ void getenv_test4() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test5() {
+void getenv_test5(void) {
char *p, *p2, *p3;
p = getenv("VAR");
@@ -92,7 +92,7 @@ void getenv_test5() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test6() {
+void getenv_test6(void) {
char *p, *p2;
p = getenv("VAR");
*p; // no-warning
@@ -120,7 +120,7 @@ void getenv_test6() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test7() {
+void getenv_test7(void) {
char *p, *p2;
p = getenv("VAR");
// expected-note@-1{{previous function call was here}}
@@ -134,7 +134,7 @@ void getenv_test7() {
// expected-note@-2{{use of invalidated pointer 'p' in a function call}}
}
-void getenv_test8() {
+void getenv_test8(void) {
static const char *array[] = {
0,
0,
@@ -159,7 +159,7 @@ void getenv_test8() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test9() {
+void getenv_test9(void) {
char *p, *p2;
p = getenv("something");
p = bar();
@@ -167,7 +167,7 @@ void getenv_test9() {
*p; // no-warning: p does not point to getenv anymore
}
-void getenv_test10() {
+void getenv_test10(void) {
strcmp(getenv("VAR1"), getenv("VAR2"));
// expected-note@-1{{'getenv' call may invalidate the the result of the previous 'getenv'}}
// expected-note@-2{{previous function call was here}}
@@ -181,7 +181,7 @@ void dereference_pointer(char* a) {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void getenv_test11() {
+void getenv_test11(void) {
char *p = getenv("VAR");
// expected-note@-1{{previous function call was here}}
@@ -212,7 +212,7 @@ void getenv_test12(int flag1, int flag2) {
}
}
-void setlocale_test1() {
+void setlocale_test1(void) {
char *p, *p2;
p = setlocale(0, "VAR");
*p; // no-warning
@@ -250,7 +250,7 @@ void setlocale_test2(int flag) {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void strerror_test1() {
+void strerror_test1(void) {
char *p, *p2;
p = strerror(0);
@@ -298,7 +298,7 @@ void strerror_test2(int errno) {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void asctime_test() {
+void asctime_test(void) {
const tm *t;
const tm *tt;
@@ -312,7 +312,7 @@ void asctime_test() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void localeconv_test1() {
+void localeconv_test1(void) {
lconv *lc1 = localeconv();
// expected-note@-1{{previous function call was here}}
lconv *lc2 = localeconv();
@@ -323,7 +323,7 @@ void localeconv_test1() {
// expected-note@-2{{dereferencing an invalid pointer}}
}
-void localeconv_test2() {
+void localeconv_test2(void) {
// TODO: false negative
lconv *lc1 = localeconv();
lconv *lc2 = localeconv();
diff --git a/clang/test/Analysis/cfg.c b/clang/test/Analysis/cfg.c
index 429406252538..4bd84e689f25 100644
--- a/clang/test/Analysis/cfg.c
+++ b/clang/test/Analysis/cfg.c
@@ -50,7 +50,7 @@ void checkWrap(int i) {
// CHECK-NEXT: 4: asm ("" : "=r" ([B1.3]));
// CHECK-NEXT: 5: arg
// CHECK-NEXT: 6: asm ("" : "=r" ([B1.5]));
-void checkGCCAsmRValueOutput() {
+void checkGCCAsmRValueOutput(void) {
int arg;
__asm__("" : "=r"((int)arg)); // rvalue output operand
__asm__("" : "=r"(arg)); // lvalue output operand
diff --git a/clang/test/Analysis/class-object-state-dump.m b/clang/test/Analysis/class-object-state-dump.m
index 66519b82adb1..740e517b8302 100644
--- a/clang/test/Analysis/class-object-state-dump.m
+++ b/clang/test/Analysis/class-object-state-dump.m
@@ -3,7 +3,7 @@
// expected-no-diagnostics
-void clang_analyzer_printState();
+void clang_analyzer_printState(void);
@interface NSObject {
}
diff --git a/clang/test/Analysis/compound-literals.c b/clang/test/Analysis/compound-literals.c
index 42e6a55a30c7..c74eacc3aa68 100644
--- a/clang/test/Analysis/compound-literals.c
+++ b/clang/test/Analysis/compound-literals.c
@@ -11,7 +11,7 @@ void foo(void) {
}
// check that we propagate info through compound literal regions
-void bar() {
+void bar(void) {
int *integers = (int[]){1, 2, 3};
clang_analyzer_eval(integers[0] == 1); // expected-warning{{TRUE}}
clang_analyzer_eval(integers[1] == 2); // expected-warning{{TRUE}}
diff --git a/clang/test/Analysis/concrete-address.c b/clang/test/Analysis/concrete-address.c
index f1608f8a801c..fe0de4a1ff25 100644
--- a/clang/test/Analysis/concrete-address.c
+++ b/clang/test/Analysis/concrete-address.c
@@ -1,7 +1,7 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core,alpha.core -analyzer-store=region -verify %s
// expected-no-diagnostics
-void foo() {
+void foo(void) {
int *p = (int*) 0x10000; // Should not crash here.
*p = 3;
}
diff --git a/clang/test/Analysis/constant-folding.c b/clang/test/Analysis/constant-folding.c
index 116e74b746b4..5de4f0ae3cd3 100644
--- a/clang/test/Analysis/constant-folding.c
+++ b/clang/test/Analysis/constant-folding.c
@@ -179,7 +179,7 @@ void testBitwiseRules(unsigned int a, int b, int c) {
}
}
-unsigned reset();
+unsigned reset(void);
void testCombinedSources(unsigned a, unsigned b) {
if (b >= 10 && (a | b) <= 30) {
diff --git a/clang/test/Analysis/constraint-assignor.c b/clang/test/Analysis/constraint-assignor.c
index 1b9e40e6bf64..8210e576c98b 100644
--- a/clang/test/Analysis/constraint-assignor.c
+++ b/clang/test/Analysis/constraint-assignor.c
@@ -3,7 +3,7 @@
// RUN: -analyzer-checker=debug.ExprInspection \
// RUN: -verify
-void clang_analyzer_warnIfReached();
+void clang_analyzer_warnIfReached(void);
void clang_analyzer_eval(int);
void rem_constant_rhs_ne_zero(int x, int y) {
diff --git a/clang/test/Analysis/conversion-tracking-notes.c b/clang/test/Analysis/conversion-tracking-notes.c
index 94b3dc1c8bc4..d9db5e99200b 100644
--- a/clang/test/Analysis/conversion-tracking-notes.c
+++ b/clang/test/Analysis/conversion-tracking-notes.c
@@ -7,7 +7,7 @@
unsigned char U8;
signed char S8;
-void track_assign() {
+void track_assign(void) {
unsigned long L = 1000; // expected-note {{'L' initialized to 1000}}
int I = -1; // expected-note {{'I' initialized to -1}}
U8 *= L; // expected-warning {{Loss of precision in implicit conversion}}
diff --git a/clang/test/Analysis/conversion.c b/clang/test/Analysis/conversion.c
index f6b0c11a15ae..78b614516b63 100644
--- a/clang/test/Analysis/conversion.c
+++ b/clang/test/Analysis/conversion.c
@@ -17,21 +17,21 @@ void assign(unsigned U, signed S) {
S8 = U; // no-warning
}
-void addAssign() {
+void addAssign(void) {
unsigned long L = 1000;
int I = -100;
U8 += L; // expected-warning {{Loss of precision in implicit conversion}}
L += I; // no-warning
}
-void subAssign() {
+void subAssign(void) {
unsigned long L = 1000;
int I = -100;
U8 -= L; // expected-warning {{Loss of precision in implicit conversion}}
L -= I; // no-warning
}
-void mulAssign() {
+void mulAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 *= L; // expected-warning {{Loss of precision in implicit conversion}}
@@ -40,42 +40,42 @@ void mulAssign() {
L *= I; // no-warning
}
-void divAssign() {
+void divAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 /= L; // no-warning
L /= I; // expected-warning {{Loss of sign in implicit conversion}}
}
-void remAssign() {
+void remAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 %= L; // no-warning
L %= I; // expected-warning {{Loss of sign in implicit conversion}}
}
-void andAssign() {
+void andAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 &= L; // no-warning
L &= I; // expected-warning {{Loss of sign in implicit conversion}}
}
-void orAssign() {
+void orAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 |= L; // expected-warning {{Loss of precision in implicit conversion}}
L |= I; // expected-warning {{Loss of sign in implicit conversion}}
}
-void xorAssign() {
+void xorAssign(void) {
unsigned long L = 1000;
int I = -1;
U8 ^= L; // expected-warning {{Loss of precision in implicit conversion}}
L ^= I; // expected-warning {{Loss of sign in implicit conversion}}
}
-void init1() {
+void init1(void) {
long long A = 1LL << 60;
short X = A; // expected-warning {{Loss of precision in implicit conversion}}
}
@@ -108,7 +108,7 @@ void division(unsigned U, signed S) {
void f(unsigned x) {}
void g(unsigned x) {}
-void functioncall1() {
+void functioncall1(void) {
long x = -1;
int y = 0;
f(x); // expected-warning {{Loss of sign in implicit conversion}}
@@ -145,11 +145,11 @@ void dontwarn3(int X) {
// don't warn for macros
#define DOSTUFF ({ unsigned X = 1000; U8 = X; })
-void dontwarn4() {
+void dontwarn4(void) {
DOSTUFF;
}
-void dontwarn5() {
+void dontwarn5(void) {
unsigned char c1 = 'A';
c1 = (c1 >= 'A' && c1 <= 'Z') ? c1 - 'A' + 'a' : c1;
unsigned char c2 = 0;
@@ -162,7 +162,7 @@ void dontwarn5() {
c5 = (c5 >= 'A' && c5 <= 'Z') ? c5 - 'A' + 'a' : c5;
}
-void dontwarn6() {
+void dontwarn6(void) {
int x = ~0;
unsigned y = ~0;
}
@@ -172,11 +172,11 @@ void dontwarn7(unsigned x) {
}
}
-void dontwarn8() {
+void dontwarn8(void) {
unsigned x = (unsigned)-1;
}
-unsigned dontwarn9() {
+unsigned dontwarn9(void) {
return ~0;
}
@@ -190,7 +190,7 @@ char dontwarn10(long long x) {
// C library functions, handled via apiModeling.StdCLibraryFunctions
int isascii(int c);
-void libraryFunction1() {
+void libraryFunction1(void) {
char kb2[5];
int X = 1000;
if (isascii(X)) {
@@ -204,7 +204,7 @@ typedef struct FILE {} FILE; int getc(FILE *stream);
char reply_string[8192];
FILE *cin;
extern int dostuff(void);
-int libraryFunction2() {
+int libraryFunction2(void) {
int c, n;
int dig;
char *cp = reply_string;
@@ -239,7 +239,7 @@ double floating_point(long long a, int b) {
return 137;
}
-double floating_point2() {
+double floating_point2(void) {
int a = 1 << 24;
long long b = 1LL << 53;
float f = a; // no-warning
diff --git a/clang/test/Analysis/copypaste/generic.c b/clang/test/Analysis/copypaste/generic.c
index 2fa6c302da17..4c265b2d68b1 100644
--- a/clang/test/Analysis/copypaste/generic.c
+++ b/clang/test/Analysis/copypaste/generic.c
@@ -4,7 +4,7 @@
int global;
-int foo1() {
+int foo1(void) {
if (global > 0)
return 0;
else if (global < 0)
@@ -13,7 +13,7 @@ int foo1() {
}
// Different associated type (int instead of float)
-int foo2() {
+int foo2(void) {
if (global > 0)
return 0;
else if (global < 0)
@@ -22,7 +22,7 @@ int foo2() {
}
// Different number of associated types.
-int foo3() {
+int foo3(void) {
if (global > 0)
return 0;
else if (global < 0)
diff --git a/clang/test/Analysis/coverage.c b/clang/test/Analysis/coverage.c
index b819f10edc13..f0fa1d40c39d 100644
--- a/clang/test/Analysis/coverage.c
+++ b/clang/test/Analysis/coverage.c
@@ -93,7 +93,7 @@ void coverage9(int *x) {
y = (*x); // no warning
}
-static void empty_function(){
+static void empty_function(void){
}
int use_empty_function(int x) {
x = 0;
diff --git a/clang/test/Analysis/crash-trace.c b/clang/test/Analysis/crash-trace.c
index f00db3a74ab9..857b3a228d07 100644
--- a/clang/test/Analysis/crash-trace.c
+++ b/clang/test/Analysis/crash-trace.c
@@ -13,7 +13,7 @@ void inlined(int x, float y) {
clang_analyzer_crash();
}
-void test() {
+void test(void) {
inlined(0, 0);
}
diff --git a/clang/test/Analysis/cstring-plist.c b/clang/test/Analysis/cstring-plist.c
index 65fa9fe74d1d..3851a5469376 100644
--- a/clang/test/Analysis/cstring-plist.c
+++ b/clang/test/Analysis/cstring-plist.c
@@ -14,7 +14,7 @@ char *strncpy(char *restrict s1, const char *restrict s2, size_t n);
-void cstringchecker_bounds_nocrash() {
+void cstringchecker_bounds_nocrash(void) {
char *p = malloc(2);
strncpy(p, "AAA", sizeof("AAA")); // we don't expect warning as the checker is disabled
free(p);
diff --git a/clang/test/Analysis/cstring-ranges.c b/clang/test/Analysis/cstring-ranges.c
index dc6bb67e6de5..27f90cc888bd 100644
--- a/clang/test/Analysis/cstring-ranges.c
+++ b/clang/test/Analysis/cstring-ranges.c
@@ -7,7 +7,7 @@
char *strcpy(char *, const char *);
-void foo() {
+void foo(void) {
char *a = 0, *b = 0;
strcpy(a, b);
}
diff --git a/clang/test/Analysis/cstring-syntax-weird2.c b/clang/test/Analysis/cstring-syntax-weird2.c
index a0f28536d4a3..0250d69d994e 100644
--- a/clang/test/Analysis/cstring-syntax-weird2.c
+++ b/clang/test/Analysis/cstring-syntax-weird2.c
@@ -6,9 +6,9 @@
typedef __SIZE_TYPE__ size_t;
// The last parameter is normally size_t but the test is about the abnormal
// situation when it's not a size_t.
-size_t strlcpy(char *, const char *, void (*)());
+size_t strlcpy(char *, const char *, void (*)(void));
-void foo();
+void foo(void);
void testWeirdDecls(const char *src) {
char dst[10];
diff --git a/clang/test/Analysis/ctu-main.c b/clang/test/Analysis/ctu-main.c
index 1415490668ba..00ee7e414e2c 100644
--- a/clang/test/Analysis/ctu-main.c
+++ b/clang/test/Analysis/ctu-main.c
@@ -18,7 +18,7 @@ typedef struct {
} FooBar;
extern FooBar fb;
int f(int);
-void testGlobalVariable() {
+void testGlobalVariable(void) {
clang_analyzer_eval(f(5) == 1); // expected-warning{{TRUE}}
}
@@ -27,14 +27,14 @@ int enumCheck(void);
enum A { x,
y,
z };
-void testEnum() {
+void testEnum(void) {
clang_analyzer_eval(x == 0); // expected-warning{{TRUE}}
clang_analyzer_eval(enumCheck() == 42); // expected-warning{{TRUE}}
}
// Test that asm import does not fail.
-int inlineAsm();
-int testInlineAsm() {
+int inlineAsm(void);
+int testInlineAsm(void) {
return inlineAsm();
}
@@ -47,7 +47,7 @@ void testMacro(void) {
// The external function prototype is incomplete.
// warning:implicit functions are prohibited by c99
-void testImplicit() {
+void testImplicit(void) {
int res = identImplicit(6); // external implicit functions are not inlined
clang_analyzer_eval(res == 6); // expected-warning{{TRUE}}
// Call something with uninitialized from the same function in which the implicit was called.
@@ -63,7 +63,7 @@ struct DataType {
int b;
};
int structInProto(struct DataType *d);
-void testStructDefInArgument() {
+void testStructDefInArgument(void) {
struct DataType d;
d.a = 1;
d.b = 0;
diff --git a/clang/test/Analysis/dead-stores.c b/clang/test/Analysis/dead-stores.c
index 2ce94eb31b19..701e0a58b84e 100644
--- a/clang/test/Analysis/dead-stores.c
+++ b/clang/test/Analysis/dead-stores.c
@@ -12,7 +12,7 @@
// RUN: -analyzer-config deadcode.DeadStores:ShowFixIts=true \
// RUN: -verify=non-nested,nested
-void f1() {
+void f1(void) {
int k, y; // non-nested-warning {{unused variable 'k'}}
// non-nested-warning@-1 {{unused variable 'y'}}
int abc = 1;
@@ -34,8 +34,8 @@ void f2(void *b) {
// non-nested-note@-2 {{include the header <stdio.h> or explicitly provide a declaration for 'printf'}}
}
-int f();
-void f3() {
+int f(void);
+void f3(void) {
int r;
if ((r = f()) != 0) { // no-warning
int y = r; // no-warning
@@ -50,7 +50,7 @@ void f4(int k) {
k = 2; // non-nested-warning {{never read}}
}
-void f5() {
+void f5(void) {
int x = 4; // no-warning
int *p = &x; // non-nested-warning {{never read}}
// non-nested-warning@-1 {{unused variable 'p'}}
@@ -58,7 +58,7 @@ void f5() {
// CHECK-FIXES-NEXT: int *p;
}
-int f6() {
+int f6(void) {
int x = 4;
++x; // no-warning
return 1;
@@ -90,30 +90,30 @@ int f7d(int *p) {
// Warn for dead stores in nested expressions.
int f8(int *p) {
- extern int *baz();
+ extern int *baz(void);
if ((p = baz())) // nested-warning {{Although the value stored}}
return 1;
return 0;
}
-int f9() {
+int f9(void) {
int x = 4;
x = x + 10; // non-nested-warning {{never read}}
return 1;
}
-int f10() {
+int f10(void) {
int x = 4;
x = 10 + x; // non-nested-warning {{never read}}
return 1;
}
-int f11() {
+int f11(void) {
int x = 4;
return x++; // non-nested-warning {{never read}}
}
-int f11b() {
+int f11b(void) {
int x = 4;
return ((((++x)))); // no-warning
}
@@ -171,7 +171,7 @@ int f16(int x) {
}
// Self-assignments should not be flagged as dead stores.
-void f17() {
+void f17(void) {
int x = 1;
x = x;
}
@@ -180,7 +180,7 @@ void f17() {
// The values of dead stores are only "consumed" in an enclosing expression
// what that value is actually used. In other words, don't say "Although the
// value stored to 'x' is used...".
-int f18() {
+int f18(void) {
int x = 0; // no-warning
if (1)
x = 10; // non-nested-warning {{Value stored to 'x' is never read}}
@@ -193,24 +193,24 @@ int f18() {
return (x = 10); // no-warning
}
-int f18_a() {
+int f18_a(void) {
int x = 0; // no-warning
return (x = 10); // nested-warning {{Although the value stored}}
}
-void f18_b() {
+void f18_b(void) {
int x = 0; // no-warning
if (1)
x = 10; // non-nested-warning {{Value stored to 'x' is never read}}
}
-void f18_c() {
+void f18_c(void) {
int x = 0;
while (1)
x = 10; // non-nested-warning {{Value stored to 'x' is never read}}
}
-void f18_d() {
+void f18_d(void) {
int x = 0; // no-warning
do
x = 10; // non-nested-warning {{Value stored to 'x' is never read}}
@@ -238,8 +238,8 @@ void f20(void) {
#pragma unused(x)
}
-void halt() __attribute__((noreturn));
-int f21() {
+void halt(void) __attribute__((noreturn));
+int f21(void) {
int x = 4;
x = x + 1; // non-nested-warning {{never read}}
if (1) {
@@ -250,7 +250,7 @@ int f21() {
}
int j;
-void f22() {
+void f22(void) {
int x = 4;
int y1 = 4;
int y2 = 4;
@@ -473,7 +473,7 @@ int f24_D(int y) {
int f25(int y) {
__block int x = (y > 2);
__block int z = 0;
- void (^foo)() = ^{
+ void (^foo)(void) = ^{
z = x + y;
};
x = 4; // no-warning
@@ -492,7 +492,7 @@ int f25_b(int y) {
return z;
}
-int f26_nestedblocks() {
+int f26_nestedblocks(void) {
int z;
z = 1;
__block int y = 0;
@@ -508,7 +508,7 @@ int f26_nestedblocks() {
// The FOREACH macro in QT uses 'break' statements within statement expressions
// placed within the increment code of for loops.
-void rdar8014335() {
+void rdar8014335(void) {
for (int i = 0 ; i != 10 ; ({ break; })) {
for (;; ({ ++i; break; }))
;
@@ -546,7 +546,7 @@ void rdar8320674(s_rdar8320674 *z, unsigned y, s2_rdar8320674 *st, int m)
// Avoid dead stores resulting from an assignment (and use) being unreachable.
void rdar8405222_aux(int i);
-void rdar8405222() {
+void rdar8405222(void) {
const int show = 0;
int i = 0;
if (show)
@@ -557,13 +557,13 @@ void rdar8405222() {
// Look through chains of assignments, e.g.: int x = y = 0, when employing
// silencing heuristics.
-int radar11185138_foo() {
+int radar11185138_foo(void) {
int x, y;
x = y = 0; // non-nested-warning {{never read}}
return y;
}
-int rdar11185138_bar() {
+int rdar11185138_bar(void) {
int y;
int x = y = 0; // nested-warning {{Although the value stored}}
x = 2;
@@ -571,15 +571,15 @@ int rdar11185138_bar() {
return x + y;
}
-int *radar11185138_baz() {
+int *radar11185138_baz(void) {
int *x, *y;
x = y = 0; // no-warning
return y;
}
-int getInt();
-int *getPtr();
-void testBOComma() {
+int getInt(void);
+int *getPtr(void);
+void testBOComma(void) {
int x0 = (getInt(), 0); // non-nested-warning {{unused variable 'x0'}}
int x1 = (getInt(), getInt());
// non-nested-warning@-1 {{Value stored to 'x1' during its initialization is never read}}
@@ -631,7 +631,7 @@ void testBOComma() {
p = (getPtr(), (int *)0); // no warning
}
-void testVolatile() {
+void testVolatile(void) {
volatile int v;
v = 0; // no warning
}
@@ -654,7 +654,7 @@ int rdar34122265_test(int input) {
return foo.x + foo.y;
}
-void rdar34122265_test_cast() {
+void rdar34122265_test_cast(void) {
// This is allowed for defensive programming.
struct Foo foo = {0, 0};
(void)foo;
diff --git a/clang/test/Analysis/dead-stores.m b/clang/test/Analysis/dead-stores.m
index 27543ab38139..240479b8dc75 100644
--- a/clang/test/Analysis/dead-stores.m
+++ b/clang/test/Analysis/dead-stores.m
@@ -45,8 +45,8 @@ void rdar_7631278(NSObject *x) {
// This test case issuing a bogus warning for the declaration of 'isExec'
// because the compound statement for the @synchronized was being visited
// twice by the LiveVariables analysis.
-BOOL baz_rdar8527823();
-void foo_rdar8527823();
+BOOL baz_rdar8527823(void);
+void foo_rdar8527823(void);
@interface RDar8527823
- (void) bar_rbar8527823;
@end
@@ -83,9 +83,9 @@ void foo_rdar8527823();
@property (assign) int x;
@end
-RDar10591355 *rdar10591355_aux();
+RDar10591355 *rdar10591355_aux(void);
-void rdar10591355() {
+void rdar10591355(void) {
RDar10591355 *p = rdar10591355_aux();
^{ (void) p.x; }();
}
@@ -110,8 +110,8 @@ Radar11059352_1 *_Path;
}
@end
-id test_objc_precise_lifetime_foo();
-void test_objc_precise_lifetime() {
+id test_objc_precise_lifetime_foo(void);
+void test_objc_precise_lifetime(void) {
__attribute__((objc_precise_lifetime)) id dead = test_objc_precise_lifetime_foo(); // no-warning
dead = 0;
dead = test_objc_precise_lifetime_foo(); // no-warning
diff --git a/clang/test/Analysis/debug-exprinspection-istainted.c b/clang/test/Analysis/debug-exprinspection-istainted.c
index e2f6821e4aa9..8d1ebca93088 100644
--- a/clang/test/Analysis/debug-exprinspection-istainted.c
+++ b/clang/test/Analysis/debug-exprinspection-istainted.c
@@ -8,7 +8,7 @@ void clang_analyzer_isTainted(char);
void clang_analyzer_isTainted_any_suffix(char);
void clang_analyzer_isTainted_many_arguments(char, int, int);
-void foo() {
+void foo(void) {
char buf[32] = "";
clang_analyzer_isTainted(buf[0]); // expected-warning {{NO}}
clang_analyzer_isTainted_any_suffix(buf[0]); // expected-warning {{NO}}
@@ -19,7 +19,7 @@ void foo() {
int tainted_value = buf[0]; // no-warning
}
-void exactly_one_argument_required() {
+void exactly_one_argument_required(void) {
char buf[32] = "";
scanf("%s", buf);
clang_analyzer_isTainted_many_arguments(buf[0], 42, 42);
diff --git a/clang/test/Analysis/default-analyze.m b/clang/test/Analysis/default-analyze.m
index e2f7297884d7..7c3d6b99a228 100644
--- a/clang/test/Analysis/default-analyze.m
+++ b/clang/test/Analysis/default-analyze.m
@@ -52,9 +52,9 @@ static __inline__ __attribute__((always_inline)) CGFloat NSHeight(NSRect aRect)
return (aRect.size.height);
}
-NSSize rdar880566_size();
+NSSize rdar880566_size(void);
-double rdar8808566() {
+double rdar8808566(void) {
NSRect myRect;
myRect.size = rdar880566_size();
double x = NSWidth(myRect) + NSHeight(myRect); // no-warning
diff --git a/clang/test/Analysis/default-diagnostic-visitors.c b/clang/test/Analysis/default-diagnostic-visitors.c
index c8f64bc6d7f4..894684c9c504 100644
--- a/clang/test/Analysis/default-diagnostic-visitors.c
+++ b/clang/test/Analysis/default-diagnostic-visitors.c
@@ -2,7 +2,7 @@
// This file is for testing enhanced diagnostics produced by the default BugReporterVisitors.
-int getPasswordAndItem()
+int getPasswordAndItem(void)
{
int err = 0;
int *password; // expected-note {{'password' declared without an initial value}}
diff --git a/clang/test/Analysis/designated-initializer-values.c b/clang/test/Analysis/designated-initializer-values.c
index 1efc10aece60..ebe6cd979751 100644
--- a/clang/test/Analysis/designated-initializer-values.c
+++ b/clang/test/Analysis/designated-initializer-values.c
@@ -2,7 +2,7 @@
void clang_analyzer_eval(int);
-void array_init() {
+void array_init(void) {
int a[5] = {[4] = 29, [2] = 15, [0] = 4};
clang_analyzer_eval(a[0] == 4); // expected-warning{{TRUE}}
clang_analyzer_eval(a[1] == 0); // expected-warning{{TRUE}}
@@ -21,13 +21,13 @@ struct point {
int x, y;
};
-void struct_init() {
+void struct_init(void) {
struct point p = {.y = 5, .x = 3};
clang_analyzer_eval(p.x == 3); // expected-warning{{TRUE}}
clang_analyzer_eval(p.y == 5); // expected-warning{{TRUE}}
}
-void array_of_struct() {
+void array_of_struct(void) {
struct point ptarray[3] = { [2].y = 1, [2].x = 2, [0].x = 3 };
clang_analyzer_eval(ptarray[0].x == 3); // expected-warning{{TRUE}}
clang_analyzer_eval(ptarray[0].y == 0); // expected-warning{{TRUE}}
diff --git a/clang/test/Analysis/designated-initializer.c b/clang/test/Analysis/designated-initializer.c
index adca0ab6c875..aba037a3f49b 100644
--- a/clang/test/Analysis/designated-initializer.c
+++ b/clang/test/Analysis/designated-initializer.c
@@ -3,12 +3,12 @@
struct Q { int a, b, c; };
union UQ { struct Q q; };
-union UQ getUQ() {
+union UQ getUQ(void) {
union UQ u = { { 1, 2, 3 } };
return u;
}
-void test() {
+void test(void) {
struct LUQ { union UQ uq; } var = { getUQ(), .uq.q.a = 100 };
struct Q s[] = {
[0] = (struct Q){1, 2},
@@ -19,7 +19,7 @@ void test() {
// CHECK: void test()
// CHECK: [B1]
// CHECK: 1: getUQ
-// CHECK: 2: [B1.1] (ImplicitCastExpr, FunctionToPointerDecay, union UQ (*)())
+// CHECK: 2: [B1.1] (ImplicitCastExpr, FunctionToPointerDecay, union UQ (*)(void))
// CHECK: 3: [B1.2]()
// CHECK: 4: 100
// CHECK: 5: /*no init*/
diff --git a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif
index a54c4539708c..0f4794184950 100644
--- a/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif
+++ b/clang/test/Analysis/diagnostics/Inputs/expected-sarif/sarif-multi-diagnostic-test.c.sarif
@@ -4,7 +4,7 @@
{
"artifacts": [
{
- "length": 1077,
+ "length": 1081,
"location": {
},
"mimeType": "text/plain",
diff --git a/clang/test/Analysis/diagnostics/deref-track-symbolic-region.c b/clang/test/Analysis/diagnostics/deref-track-symbolic-region.c
index 597ad4fabbc4..ab1a8a814ee1 100644
--- a/clang/test/Analysis/diagnostics/deref-track-symbolic-region.c
+++ b/clang/test/Analysis/diagnostics/deref-track-symbolic-region.c
@@ -7,7 +7,7 @@ struct S {
int y;
};
-int *foo();
+int *foo(void);
void test(struct S syz, int *pp) {
int m = 0;
diff --git a/clang/test/Analysis/diagnostics/false-positive-suppression.c b/clang/test/Analysis/diagnostics/false-positive-suppression.c
index 87c04cbcdc07..248b8154dced 100644
--- a/clang/test/Analysis/diagnostics/false-positive-suppression.c
+++ b/clang/test/Analysis/diagnostics/false-positive-suppression.c
@@ -6,7 +6,7 @@
typedef __typeof(sizeof(int)) size_t;
void *malloc(size_t);
-int radar12491259() {
+int radar12491259(void) {
int *p = malloc(12);
FREE_POINTER(p);
FREE_POINTER(p); // no-warning: we are suppressing errors coming from sys/queue macros.
@@ -15,7 +15,7 @@ int radar12491259() {
#define MYMACRO(p) FREE_POINTER(p)
-int radar12491259_inside_macro() {
+int radar12491259_inside_macro(void) {
int *p = malloc(12);
MYMACRO(p);
MYMACRO(p); // no-warning: we are suppressing errors coming from sys/queue macros.
diff --git a/clang/test/Analysis/diagnostics/find_last_store.c b/clang/test/Analysis/diagnostics/find_last_store.c
index 486e4ec64d16..e6162f5472cf 100644
--- a/clang/test/Analysis/diagnostics/find_last_store.c
+++ b/clang/test/Analysis/diagnostics/find_last_store.c
@@ -1,11 +1,11 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core -analyzer-output=text -verify %s
typedef struct { float b; } c;
-void *a();
-void *d() {
+void *a(void);
+void *d(void) {
return a();
}
-void no_find_last_store() {
+void no_find_last_store(void) {
c *e = d(); // expected-note{{'e' initialized here}}
(void)(e || e->b); // expected-note{{Assuming 'e' is null}}
diff --git a/clang/test/Analysis/diagnostics/macro-null-return-suppression.cpp b/clang/test/Analysis/diagnostics/macro-null-return-suppression.cpp
index a2928f15c1e3..06a2e895bc9a 100644
--- a/clang/test/Analysis/diagnostics/macro-null-return-suppression.cpp
+++ b/clang/test/Analysis/diagnostics/macro-null-return-suppression.cpp
@@ -2,7 +2,7 @@
#define NULL 0
-int test_noparammacro() {
+int test_noparammacro(void) {
int *x = NULL; // expected-note{{'x' initialized to a null pointer value}}
return *x; // expected-warning{{Dereference of null pointer (loaded from variable 'x')}}
// expected-note@-1{{Dereference of null pointer (loaded from variable 'x')}}
@@ -22,7 +22,7 @@ char test_declaration(int *param) {
return *param2;
}
-int coin();
+int coin(void);
int test_multi_decl(int *paramA, int *paramB) {
char *param1 = DYN_CAST(paramA), *param2 = DYN_CAST(paramB);
@@ -38,7 +38,7 @@ int testDivision(int a) {
}
// Warning should not be suppressed if it happens in the same macro.
-#define DEREF_IN_MACRO(X) int fn() {int *p = 0; return *p; }
+#define DEREF_IN_MACRO(X) int fn(void) {int *p = 0; return *p; }
DEREF_IN_MACRO(0) // expected-warning{{Dereference of null pointer}}
// expected-note@-1{{'p' initialized to a null}}
@@ -47,8 +47,8 @@ DEREF_IN_MACRO(0) // expected-warning{{Dereference of null pointer}}
// Warning should not be suppressed if the null returned by the macro
// is not related to the warning.
#define RETURN_NULL() (0)
-extern int* returnFreshPointer();
-int noSuppressMacroUnrelated() {
+extern int* returnFreshPointer(void);
+int noSuppressMacroUnrelated(void) {
int *x = RETURN_NULL();
x = returnFreshPointer(); // expected-note{{Value assigned to 'x'}}
if (x) {} // expected-note{{Taking false branch}}
@@ -59,7 +59,7 @@ int noSuppressMacroUnrelated() {
// Value haven't changed by the assignment, but the null pointer
// did not come from the macro.
-int noSuppressMacroUnrelatedOtherReason() {
+int noSuppressMacroUnrelatedOtherReason(void) {
int *x = RETURN_NULL();
x = returnFreshPointer();
x = 0; // expected-note{{Null pointer value stored to 'x'}}
diff --git a/clang/test/Analysis/diagnostics/no-prune-paths.c b/clang/test/Analysis/diagnostics/no-prune-paths.c
index 6e9e45766bf5..8f807922176e 100644
--- a/clang/test/Analysis/diagnostics/no-prune-paths.c
+++ b/clang/test/Analysis/diagnostics/no-prune-paths.c
@@ -4,12 +4,12 @@
// "prune-paths" is a debug option only; this is just a simple test to see that
// it's being honored.
-void helper() {
- extern void foo();
+void helper(void) {
+ extern void foo(void);
foo();
}
-void test() {
+void test(void) {
helper();
#if NPRUNE
// expected-note@-2 {{Calling 'helper'}}
diff --git a/clang/test/Analysis/diagnostics/no-store-func-path-notes.c b/clang/test/Analysis/diagnostics/no-store-func-path-notes.c
index fd0a90e85e29..2aa95fe2d617 100644
--- a/clang/test/Analysis/diagnostics/no-store-func-path-notes.c
+++ b/clang/test/Analysis/diagnostics/no-store-func-path-notes.c
@@ -14,7 +14,7 @@ int initializer1(int *p, int x) {
}
}
-int param_not_initialized_by_func() {
+int param_not_initialized_by_func(void) {
int p; // expected-note {{'p' declared without an initial value}}
int out = initializer1(&p, 0); // expected-note{{Calling 'initializer1'}}
// expected-note@-1{{Returning from 'initializer1'}}
@@ -22,7 +22,7 @@ int param_not_initialized_by_func() {
// expected-warning@-1{{Undefined or garbage value returned to caller}}
}
-int param_initialized_properly() {
+int param_initialized_properly(void) {
int p;
int out = initializer1(&p, 1);
return p; //no-warning
@@ -40,7 +40,7 @@ int initializer2(int **p, int x) {
}
}
-int param_not_written_into_by_func() {
+int param_not_written_into_by_func(void) {
int *p = 0; // expected-note{{'p' initialized to a null pointer value}}
int out = initializer2(&p, 0); // expected-note{{Calling 'initializer2'}}
// expected-note@-1{{Returning from 'initializer2'}}
@@ -54,7 +54,7 @@ void initializer3(int *p, int param) {
*p = 0;
} // expected-note{{Returning without writing to '*p'}}
-int param_written_into_by_void_func() {
+int param_written_into_by_void_func(void) {
int p; // expected-note{{'p' declared without an initial value}}
initializer3(&p, 0); // expected-note{{Calling 'initializer3'}}
// expected-note@-1{{Returning from 'initializer3'}}
@@ -74,7 +74,7 @@ void initializer5(int *p, int param) {
*p = 0;
} // expected-note{{Returning without writing to '*p'}}
-int multi_init_tries_func() {
+int multi_init_tries_func(void) {
int p; // expected-note{{'p' declared without an initial value}}
initializer4(&p, 0); // expected-note{{Calling 'initializer4'}}
// expected-note@-1{{Returning from 'initializer4'}}
@@ -88,7 +88,7 @@ int initializer6(const int *p) {
return 0;
}
-int no_msg_on_const() {
+int no_msg_on_const(void) {
int p; // expected-note{{'p' declared without an initial value}}
initializer6(&p);
return p; // expected-warning{{Undefined or garbage value returned to caller}}
@@ -108,7 +108,7 @@ int initializer7(S *s, int param) {
return 1; // expected-note{{Returning without writing to 's->x'}}
}
-int initialize_struct_field() {
+int initialize_struct_field(void) {
S local;
initializer7(&local, 0); // expected-note{{Calling 'initializer7'}}
// expected-note@-1{{Returning from 'initializer7'}}
@@ -120,7 +120,7 @@ void nullwriter(int **p) {
*p = 0; // expected-note{{Null pointer value stored to 'p'}}
} // no extra note
-int usage() {
+int usage(void) {
int x = 0;
int *p = &x;
nullwriter(&p); // expected-note{{Calling 'nullwriter'}}
@@ -138,7 +138,7 @@ void partial_initializer(A *a) {
a->x = 0;
} // expected-note{{Returning without writing to 'a->y'}}
-int use_partial_initializer() {
+int use_partial_initializer(void) {
A a;
partial_initializer(&a); // expected-note{{Calling 'partial_initializer'}}
// expected-note@-1{{Returning from 'partial_initializer'}}
@@ -159,7 +159,7 @@ void partial_nested_initializer(C *c) {
c->b.x = 0;
} // expected-note{{Returning without writing to 'c->b.y'}}
-int use_partial_nested_initializer() {
+int use_partial_nested_initializer(void) {
B localB;
C localC;
localC.b = localB;
@@ -174,7 +174,7 @@ void test_subregion_assignment(C* c) {
c->b = b;
}
-int use_subregion_assignment() {
+int use_subregion_assignment(void) {
C c;
test_subregion_assignment(&c); // expected-note{{Calling 'test_subregion_assignment'}}
// expected-note@-1{{Returning from 'test_subregion_assignment'}}
@@ -187,7 +187,7 @@ int confusing_signature(int *p) {
return 0; // expected-note{{Returning without writing to '*p'}}
}
-int use_confusing_signature() {
+int use_confusing_signature(void) {
int a; // expected-note {{'a' declared without an initial value}}
confusing_signature(&a); // expected-note{{Calling 'confusing_signature'}}
// expected-note@-1{{Returning from 'confusing_signature'}}
@@ -195,7 +195,7 @@ int use_confusing_signature() {
// expected-warning@-1{{Undefined or garbage value returned to caller}}
}
-int coin();
+int coin(void);
int multiindirection(int **p) {
if (coin()) // expected-note{{Assuming the condition is true}}
@@ -205,7 +205,7 @@ int multiindirection(int **p) {
return 0;
}
-int usemultiindirection() {
+int usemultiindirection(void) {
int a; // expected-note {{'a' declared without an initial value}}
int *b = &a;
multiindirection(&b); // expected-note{{Calling 'multiindirection'}}
@@ -223,7 +223,7 @@ int indirectingstruct(S** s) {
return 0;
}
-int useindirectingstruct() {
+int useindirectingstruct(void) {
S s;
S* p = &s;
indirectingstruct(&p); //expected-note{{Calling 'indirectingstruct'}}
@@ -242,7 +242,7 @@ void initializeMaybeInStruct(D* pD) {
*pD->x = 120;
} // expected-note{{Returning without writing to 'pD->x'}}
-int useInitializeMaybeInStruct() {
+int useInitializeMaybeInStruct(void) {
int z; // expected-note{{'z' declared without an initial value}}
D d;
d.x = &z;
diff --git a/clang/test/Analysis/diagnostics/no-store-func-path-notes.m b/clang/test/Analysis/diagnostics/no-store-func-path-notes.m
index 6ef162e4ecd5..4826b38b98a5 100644
--- a/clang/test/Analysis/diagnostics/no-store-func-path-notes.m
+++ b/clang/test/Analysis/diagnostics/no-store-func-path-notes.m
@@ -2,7 +2,7 @@
#include "../Inputs/system-header-simulator-for-nullability.h"
-extern int coin();
+extern int coin(void);
@interface I : NSObject
- (int)initVar:(int *)var param:(int)param;
@@ -41,7 +41,7 @@ int initializer1(int *p, int x) {
}
}
-int initFromBlock() {
+int initFromBlock(void) {
__block int z;
^{ // expected-note {{Calling anonymous block}}
int p; // expected-note{{'p' declared without an initial value}}
diff --git a/clang/test/Analysis/diagnostics/plist-multi-file.c b/clang/test/Analysis/diagnostics/plist-multi-file.c
index fc1d581562bd..8473d6bf0201 100644
--- a/clang/test/Analysis/diagnostics/plist-multi-file.c
+++ b/clang/test/Analysis/diagnostics/plist-multi-file.c
@@ -3,6 +3,6 @@
#include "plist-multi-file.h"
-void bar() {
+void bar(void) {
foo(0);
}
diff --git a/clang/test/Analysis/diagnostics/sarif-multi-diagnostic-test.c b/clang/test/Analysis/diagnostics/sarif-multi-diagnostic-test.c
index 542a8087c4f2..61d19817407e 100644
--- a/clang/test/Analysis/diagnostics/sarif-multi-diagnostic-test.c
+++ b/clang/test/Analysis/diagnostics/sarif-multi-diagnostic-test.c
@@ -30,7 +30,7 @@ int leak(int i) {
return 0;
}
-int unicode() {
+int unicode(void) {
int løçål = 0;
/* ☃ */ return 1 / løçål; // expected-warning {{Division by zero}}
}
diff --git a/clang/test/Analysis/diagnostics/shortest-path-suppression.c b/clang/test/Analysis/diagnostics/shortest-path-suppression.c
index d0fa4b51ef44..8f8a803854ad 100644
--- a/clang/test/Analysis/diagnostics/shortest-path-suppression.c
+++ b/clang/test/Analysis/diagnostics/shortest-path-suppression.c
@@ -1,8 +1,8 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core -analyzer-config suppress-null-return-paths=true -analyzer-output=text -verify %s
// expected-no-diagnostics
-int *returnNull() { return 0; }
-int coin();
+int *returnNull(void) { return 0; }
+int coin(void);
// Use a float parameter to ensure that the value is unknown. This will create
// a cycle in the generated ExplodedGraph.
diff --git a/clang/test/Analysis/diagnostics/text-diagnostics.c b/clang/test/Analysis/diagnostics/text-diagnostics.c
index 01946476e099..35167b85b240 100644
--- a/clang/test/Analysis/diagnostics/text-diagnostics.c
+++ b/clang/test/Analysis/diagnostics/text-diagnostics.c
@@ -1,6 +1,6 @@
// RUN: %clang_analyze_cc1 -analyzer-checker=core.NullDereference -analyzer-output=text -fno-caret-diagnostics %s 2>&1 | FileCheck %s
-void testA() {
+void testA(void) {
int *p = 0;
*p = 1;
diff --git a/clang/test/Analysis/diagnostics/undef-value-callee.h b/clang/test/Analysis/diagnostics/undef-value-callee.h
index ea48c46a62c1..9fbff2592040 100644
--- a/clang/test/Analysis/diagnostics/undef-value-callee.h
+++ b/clang/test/Analysis/diagnostics/undef-value-callee.h
@@ -1,4 +1,4 @@
-void callee() {
+void callee(void) {
;
}
diff --git a/clang/test/Analysis/diagnostics/undef-value-param.c b/clang/test/Analysis/diagnostics/undef-value-param.c
index 1c755157e1eb..e88e3c98ecfc 100644
--- a/clang/test/Analysis/diagnostics/undef-value-param.c
+++ b/clang/test/Analysis/diagnostics/undef-value-param.c
@@ -43,7 +43,7 @@ int testPassingParentRegionArray(int x) {
//expected-note@-1 {{The right operand of '*' is a garbage value}}
}
-double *getValidPtr();
+double *getValidPtr(void);
struct WithFields {
double *f1;
};
diff --git a/clang/test/Analysis/disable-all-checks.c b/clang/test/Analysis/disable-all-checks.c
index 4d1c625ef1f1..be56ffa4045e 100644
--- a/clang/test/Analysis/disable-all-checks.c
+++ b/clang/test/Analysis/disable-all-checks.c
@@ -14,7 +14,7 @@
// CHECK: no analyzer checkers or packages are associated with 'non.existant.Checker'
// CHECK: use -analyzer-disable-all-checks to disable all static analyzer checkers
-int buggy() {
+int buggy(void) {
int x = 0;
return 5/x; // no warning
}
diff --git a/clang/test/Analysis/dispatch-once.m b/clang/test/Analysis/dispatch-once.m
index b8cf582ba468..fa84f563815d 100644
--- a/clang/test/Analysis/dispatch-once.m
+++ b/clang/test/Analysis/dispatch-once.m
@@ -19,17 +19,17 @@ typedef void (^dispatch_block_t)(void);
typedef long dispatch_once_t;
void dispatch_once(dispatch_once_t *predicate, dispatch_block_t block);
-void test_stack() {
+void test_stack(void) {
dispatch_once_t once;
dispatch_once(&once, ^{}); // expected-warning{{Call to 'dispatch_once' uses the local variable 'once' for the predicate value. Using such transient memory for the predicate is potentially dangerous. Perhaps you intended to declare the variable as 'static'?}}
}
-void test_static_local() {
+void test_static_local(void) {
static dispatch_once_t once;
dispatch_once(&once, ^{}); // no-warning
}
-void test_heap_var() {
+void test_heap_var(void) {
dispatch_once_t *once = calloc(1, sizeof(dispatch_once_t));
// Use regexps to check that we're NOT suggesting to make this static.
dispatch_once(once, ^{}); // expected-warning-re{{{{^Call to 'dispatch_once' uses heap-allocated memory for the predicate value. Using such transient memory for the predicate is potentially dangerous$}}}}
@@ -44,12 +44,12 @@ typedef struct {
dispatch_once_t once;
} Struct;
-void test_local_struct() {
+void test_local_struct(void) {
Struct s;
dispatch_once(&s.once, ^{}); // expected-warning{{Call to 'dispatch_once' uses memory within the local variable 's' for the predicate value.}}
}
-void test_heap_struct() {
+void test_heap_struct(void) {
Struct *s = calloc(1, sizeof(Struct));
dispatch_once(&s->once, ^{}); // expected-warning{{Call to 'dispatch_once' uses heap-allocated memory for the predicate value.}}
}
@@ -76,15 +76,15 @@ void test_heap_struct() {
}
@end
-void test_ivar_from_alloc_init() {
+void test_ivar_from_alloc_init(void) {
Object *o = [[Object alloc] init];
dispatch_once(&o->once, ^{}); // expected-warning{{Call to 'dispatch_once' uses the instance variable 'once' for the predicate value.}}
}
-void test_ivar_struct_from_alloc_init() {
+void test_ivar_struct_from_alloc_init(void) {
Object *o = [[Object alloc] init];
dispatch_once(&o->s.once, ^{}); // expected-warning{{Call to 'dispatch_once' uses memory within the instance variable 's' for the predicate value.}}
}
-void test_ivar_array_from_alloc_init() {
+void test_ivar_array_from_alloc_init(void) {
Object *o = [[Object alloc] init];
dispatch_once(&o->once_array[1], ^{}); // expected-warning{{Call to 'dispatch_once' uses memory within the instance variable 'once_array' for the predicate value.}}
}
@@ -100,7 +100,7 @@ void test_ivar_array_from_external_obj(Object *o) {
dispatch_once(&o->once_array[1], ^{}); // expected-warning{{Call to 'dispatch_once' uses memory within the instance variable 'once_array' for the predicate value.}}
}
-void test_block_var_from_block() {
+void test_block_var_from_block(void) {
__block dispatch_once_t once;
^{
dispatch_once(&once, ^{}); // expected-warning{{Call to 'dispatch_once' uses the block variable 'once' for the predicate value.}}
@@ -109,7 +109,7 @@ void test_block_var_from_block() {
void use_block_var(dispatch_once_t *once);
-void test_block_var_from_outside_block() {
+void test_block_var_from_outside_block(void) {
__block dispatch_once_t once;
^{
use_block_var(&once);
@@ -117,7 +117,7 @@ void test_block_var_from_outside_block() {
dispatch_once(&once, ^{}); // expected-warning{{Call to 'dispatch_once' uses the block variable 'once' for the predicate value.}}
}
-void test_static_var_from_outside_block() {
+void test_static_var_from_outside_block(void) {
static dispatch_once_t once;
^{
dispatch_once(&once, ^{}); // no-warning
diff --git a/clang/test/Analysis/domtest.c b/clang/test/Analysis/domtest.c
index b642bd35319c..77b2685e98c9 100644
--- a/clang/test/Analysis/domtest.c
+++ b/clang/test/Analysis/domtest.c
@@ -5,7 +5,7 @@
// RUN: 2>&1 | FileCheck %s
// Test the DominatorsTree implementation with various control flows
-int test1()
+int test1(void)
{
int x = 6;
int y = x/2;
@@ -65,7 +65,7 @@ int test1()
// CHECK-NEXT: (8,7)
// CHECK-NEXT: (9,8)
-int test2()
+int test2(void)
{
int x,y,z;
@@ -117,7 +117,7 @@ int test2()
// CHECK-NEXT: (6,1)
// CHECK-NEXT: (7,6)
-int test3()
+int test3(void)
{
int x,y,z;
@@ -178,7 +178,7 @@ int test3()
// CHECK-NEXT: (7,1)
// CHECK-NEXT: (8,7)
-int test4()
+int test4(void)
{
int y = 3;
while(y > 0) {
@@ -257,7 +257,7 @@ int test4()
// CHECK-NEXT: (11,10)
// CHECK-NEXT: (12,11)
-int test5()
+int test5(void)
{
int x,y,z,a,b,c;
x = 1;
diff --git a/clang/test/Analysis/double-ranges-bug.c b/clang/test/Analysis/double-ranges-bug.c
index aa3dde293034..a73d2ff949ed 100644
--- a/clang/test/Analysis/double-ranges-bug.c
+++ b/clang/test/Analysis/double-ranges-bug.c
@@ -6,7 +6,7 @@ typedef unsigned long int A;
extern int fill(A **values, int *nvalues);
-void foo() {
+void foo(void) {
A *values;
int nvalues;
fill(&values, &nvalues);
diff --git a/clang/test/Analysis/dump_egraph.c b/clang/test/Analysis/dump_egraph.c
index d6f252af8e76..a2a916d0f008 100644
--- a/clang/test/Analysis/dump_egraph.c
+++ b/clang/test/Analysis/dump_egraph.c
@@ -9,9 +9,9 @@
// REQUIRES: asserts
-int getJ();
+int getJ(void);
-int foo() {
+int foo(void) {
int *x = 0, *y = 0;
char c = '\x13';
diff --git a/clang/test/Analysis/elementtype.c b/clang/test/Analysis/elementtype.c
index 7eba8e14b3a7..47f2ca1d9082 100644
--- a/clang/test/Analysis/elementtype.c
+++ b/clang/test/Analysis/elementtype.c
@@ -5,7 +5,7 @@ typedef struct added_obj_st {
} ADDED_OBJ;
// Test if we are using the canonical type for ElementRegion.
-void f() {
+void f(void) {
ADDED_OBJ *ao[4]={((void*)0),((void*)0),((void*)0),((void*)0)};
if (ao[0] != ((void*)0)) {
ao[0]->type=0;
diff --git a/clang/test/Analysis/enum-cast-out-of-range.c b/clang/test/Analysis/enum-cast-out-of-range.c
index 03e1100c38f4..3282cba653d7 100644
--- a/clang/test/Analysis/enum-cast-out-of-range.c
+++ b/clang/test/Analysis/enum-cast-out-of-range.c
@@ -10,7 +10,7 @@ enum En_t {
En_4 = 4
};
-void unscopedUnspecifiedCStyle() {
+void unscopedUnspecifiedCStyle(void) {
enum En_t Below = (enum En_t)(-5); // expected-warning {{not in the valid range}}
enum En_t NegVal1 = (enum En_t)(-4); // OK.
enum En_t NegVal2 = (enum En_t)(-3); // OK.
@@ -25,7 +25,7 @@ void unscopedUnspecifiedCStyle() {
}
enum En_t unused;
-void unusedExpr() {
+void unusedExpr(void) {
// Following line is not something that EnumCastOutOfRangeChecker should
// evaluate. Checker should either ignore this line or process it without
// producing any warnings. However, compilation will (and should) still
diff --git a/clang/test/Analysis/equality_tracking.c b/clang/test/Analysis/equality_tracking.c
index bf84e51ce702..681453b1fe29 100644
--- a/clang/test/Analysis/equality_tracking.c
+++ b/clang/test/Analysis/equality_tracking.c
@@ -9,9 +9,9 @@
#define CHAR_MIN (char)(UCHAR_MAX & ~(UCHAR_MAX >> 1))
void clang_analyzer_eval(int);
-void clang_analyzer_warnIfReached();
+void clang_analyzer_warnIfReached(void);
-int getInt();
+int getInt(void);
void zeroImpliesEquality(int a, int b) {
clang_analyzer_eval((a - b) == 0); // expected-warning{{UNKNOWN}}
diff --git a/clang/test/Analysis/exercise-ps.c b/clang/test/Analysis/exercise-ps.c
index 9bcda12b0b26..6d1b40f13cd3 100644
--- a/clang/test/Analysis/exercise-ps.c
+++ b/clang/test/Analysis/exercise-ps.c
@@ -15,7 +15,7 @@ static void f1(const char *x, char *y) {
// the RvalueType of an ElementRegion.
typedef struct F12_struct {} F12_typedef;
typedef void* void_typedef;
-void_typedef f2_helper();
+void_typedef f2_helper(void);
static void f2(void *buf) {
F12_typedef* x;
x = f2_helper();
diff --git a/clang/test/Analysis/explain-svals.m b/clang/test/Analysis/explain-svals.m
index 52f48c5e504f..e93258b3626a 100644
--- a/clang/test/Analysis/explain-svals.m
+++ b/clang/test/Analysis/explain-svals.m
@@ -21,7 +21,7 @@ void test_1(Object *p) {
clang_analyzer_explain(q->x); // expected-warning-re{{{{^initial value of instance variable 'x' of object at symbol of type 'Object \*' conjured at statement '\[\[Object alloc\] init\]'$}}}}
}
-void test_2() {
+void test_2(void) {
__block int x;
^{
clang_analyzer_explain(&x); // expected-warning-re{{{{^pointer to block variable 'x'$}}}}
diff --git a/clang/test/CodeGenCUDA/amdgpu-asan-printf.cu b/clang/test/CodeGenCUDA/amdgpu-asan-printf.cu
deleted file mode 100644
index 69246f9ce7af..000000000000
--- a/clang/test/CodeGenCUDA/amdgpu-asan-printf.cu
+++ /dev/null
@@ -1,17 +0,0 @@
-// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
-// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
-// RUN: -O3 -x hip | FileCheck -check-prefixes=MFCHECK %s
-
-// MFCHECK: !{{.*}} = !{i32 4, !"amdgpu_hostcall", i32 1}
-
-// Test to check hostcall module flag metadata is generated correctly
-// when a program has printf call and compiled with -fsanitize=address.
-#include "Inputs/cuda.h"
-__device__ void non_kernel() {
- printf("sanitized device function");
-}
-
-__global__ void kernel() {
- non_kernel();
-}
-
diff --git a/clang/test/CodeGenCUDA/amdgpu-asan.cu b/clang/test/CodeGenCUDA/amdgpu-asan.cu
index 31fb1646d973..24148b05c0ca 100644
--- a/clang/test/CodeGenCUDA/amdgpu-asan.cu
+++ b/clang/test/CodeGenCUDA/amdgpu-asan.cu
@@ -9,12 +9,12 @@
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -mlink-bitcode-file %t.asanrtl.bc -x hip \
-// RUN: | FileCheck -check-prefixes=ASAN,MFCHECK %s
+// RUN: | FileCheck -check-prefixes=ASAN %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -fsanitize=address \
// RUN: -O3 -mlink-bitcode-file %t.asanrtl.bc -x hip \
-// RUN: | FileCheck -check-prefixes=ASAN,MFCHECK %s
+// RUN: | FileCheck -check-prefixes=ASAN %s
// RUN: %clang_cc1 %s -emit-llvm -o - -triple=amdgcn-amd-amdhsa \
// RUN: -fcuda-is-device -target-cpu gfx906 -x hip \
@@ -27,7 +27,5 @@
// ASAN-DAG: @llvm.compiler.used = {{.*}}@__amdgpu_device_library_preserve_asan_functions_ptr
// ASAN-DAG: define weak void @__asan_report_load1(i64 %{{.*}})
-// MFCHECK: !{{.*}} = !{i32 4, !"amdgpu_hostcall", i32 1}
-
// CHECK-NOT: @__amdgpu_device_library_preserve_asan_functions
// CHECK-NOT: @__asan_report_load1
diff --git a/clang/test/Driver/cl-options.c b/clang/test/Driver/cl-options.c
index ae94d86eb94c..4a8a1b3c5ac4 100644
--- a/clang/test/Driver/cl-options.c
+++ b/clang/test/Driver/cl-options.c
@@ -76,7 +76,9 @@
// CHECK-NO-MIX-GEN-USE: '{{[a-z=-]*}}' not allowed with '{{[a-z=-]*}}'
// RUN: %clang_cl -### /FA -fprofile-instr-use -- %s 2>&1 | FileCheck -check-prefix=CHECK-PROFILE-USE %s
+// RUN: %clang_cl -### /FA -fprofile-use -- %s 2>&1 | FileCheck -check-prefix=CHECK-PROFILE-USE %s
// RUN: %clang_cl -### /FA -fprofile-instr-use=/tmp/somefile.prof -- %s 2>&1 | FileCheck -check-prefix=CHECK-PROFILE-USE-FILE %s
+// RUN: %clang_cl -### /FA -fprofile-use=/tmp/somefile.prof -- %s 2>&1 | FileCheck -check-prefix=CHECK-PROFILE-USE-FILE %s
// CHECK-PROFILE-USE: "-fprofile-instrument-use-path=default.profdata"
// CHECK-PROFILE-USE-FILE: "-fprofile-instrument-use-path=/tmp/somefile.prof"
diff --git a/clang/test/PCH/decl-in-prototype.c b/clang/test/PCH/decl-in-prototype.c
index a291bf2df839..865775a8fdbd 100644
--- a/clang/test/PCH/decl-in-prototype.c
+++ b/clang/test/PCH/decl-in-prototype.c
@@ -20,7 +20,7 @@ static inline __attribute__((always_inline)) f(enum { x, y } p) {
#else
-int main() {
+int main(void) {
return f(0);
}
diff --git a/clang/test/PCH/designated-init.c.h b/clang/test/PCH/designated-init.c.h
index 18216279c2e9..8a524ae1e971 100644
--- a/clang/test/PCH/designated-init.c.h
+++ b/clang/test/PCH/designated-init.c.h
@@ -54,8 +54,8 @@ struct P1 l1 = {
.q.b = { [1] = 'x' }
};
-extern struct Q1 *foo();
-static struct P1 test_foo() {
+extern struct Q1 *foo(void);
+static struct P1 test_foo(void) {
struct P1 l = { *foo(),
.q.b = { "boo" },
.q.b = { [1] = 'x' }
diff --git a/clang/test/PCH/different-diagnostic-level.c b/clang/test/PCH/different-diagnostic-level.c
index ac1a0daab164..22093baf5c54 100644
--- a/clang/test/PCH/different-diagnostic-level.c
+++ b/clang/test/PCH/different-diagnostic-level.c
@@ -8,7 +8,7 @@ extern int foo;
#else
-void f() {
+void f(void) {
int a = foo;
// Make sure we parsed this by getting an error.
int b = bar; // expected-error {{undeclared}}
diff --git a/clang/test/PCH/different-linker-version.c b/clang/test/PCH/different-linker-version.c
index 9e2f38f674a4..4ffcd99a3de8 100644
--- a/clang/test/PCH/different-linker-version.c
+++ b/clang/test/PCH/different-linker-version.c
@@ -8,7 +8,7 @@ extern int foo;
#else
-void f() {
+void f(void) {
int a = foo;
// Make sure we parsed this by getting an error.
int b = bar; // expected-error {{undeclared}}
diff --git a/clang/test/PCH/emit-dependencies.c b/clang/test/PCH/emit-dependencies.c
index c4bccf8bb1a6..8f44e61433ac 100644
--- a/clang/test/PCH/emit-dependencies.c
+++ b/clang/test/PCH/emit-dependencies.c
@@ -3,7 +3,7 @@
// RUN: %clang_cc1 -include-pch %t.pch -fsyntax-only -MT %s.o -dependency-file - %s | FileCheck %s
// CHECK: chain-decls1.h
-int main() {
+int main(void) {
f();
return 0;
}
diff --git a/clang/test/PCH/enum.c b/clang/test/PCH/enum.c
index 81dbd907ac78..9e7fbe92c25f 100644
--- a/clang/test/PCH/enum.c
+++ b/clang/test/PCH/enum.c
@@ -9,7 +9,7 @@
int i = Red;
-int return_enum_constant() {
+int return_enum_constant(void) {
int result = aRoundShape;
return result;
}
diff --git a/clang/test/PCH/exprs.c b/clang/test/PCH/exprs.c
index c0b279f88a38..1244b2faaf7f 100644
--- a/clang/test/PCH/exprs.c
+++ b/clang/test/PCH/exprs.c
@@ -30,7 +30,7 @@ floating_literal *double_ptr = &floating;
imaginary_literal *cdouble_ptr = &floating_complex;
// StringLiteral
-const char* printHello() {
+const char* printHello(void) {
return hello;
}
diff --git a/clang/test/PCH/externally-retained.m b/clang/test/PCH/externally-retained.m
index 6442c5181ee3..1c5d985a2865 100644
--- a/clang/test/PCH/externally-retained.m
+++ b/clang/test/PCH/externally-retained.m
@@ -23,7 +23,7 @@ id sharedObject = 0;
#else
//===----------------------------------------------------------------------===//
-void callDoSomething() {
+void callDoSomething(void) {
doSomething(sharedObject);
}
diff --git a/clang/test/PCH/field-designator.c b/clang/test/PCH/field-designator.c
index 6f318fd3c82a..f0486ec162d5 100644
--- a/clang/test/PCH/field-designator.c
+++ b/clang/test/PCH/field-designator.c
@@ -27,7 +27,7 @@ struct U {
#endif
//===----------------------------------------------------------------------===//
-void bar() {
+void bar(void) {
static const struct U plan = { .e = 1 };
}
diff --git a/clang/test/PCH/format-strings.c b/clang/test/PCH/format-strings.c
index 7198c4d3a263..679d1cf53d08 100644
--- a/clang/test/PCH/format-strings.c
+++ b/clang/test/PCH/format-strings.c
@@ -11,7 +11,7 @@ extern int printf(const char *restrict, ...);
#else
-void foo() {
+void foo(void) {
LOG;
}
diff --git a/clang/test/PCH/multiple-include-pch.c b/clang/test/PCH/multiple-include-pch.c
index 1ef17b9c6750..1e07b4f5ebce 100644
--- a/clang/test/PCH/multiple-include-pch.c
+++ b/clang/test/PCH/multiple-include-pch.c
@@ -11,7 +11,7 @@ extern int x;
#warning parsed this
// expected-warning@-1 {{parsed this}}
-int foo() {
+int foo(void) {
return x;
}
diff --git a/clang/test/PCH/nonvisible-external-defs.h b/clang/test/PCH/nonvisible-external-defs.h
index a36fc2ea468c..bb25a3d88426 100644
--- a/clang/test/PCH/nonvisible-external-defs.h
+++ b/clang/test/PCH/nonvisible-external-defs.h
@@ -6,6 +6,6 @@
-void f() {
+void f(void) {
extern int g(int, int);
}
diff --git a/clang/test/PCH/objc_container.h b/clang/test/PCH/objc_container.h
index c83f90238b6e..51f42d146ce2 100644
--- a/clang/test/PCH/objc_container.h
+++ b/clang/test/PCH/objc_container.h
@@ -10,7 +10,7 @@
- (void)setObject:(id)object forKeyedSubscript:(id)key;
@end
-void all() {
+void all(void) {
NSMutableArray *array;
id oldObject = array[10];
diff --git a/clang/test/PCH/objc_import.m b/clang/test/PCH/objc_import.m
index 724c8221848b..bdba92c0c805 100644
--- a/clang/test/PCH/objc_import.m
+++ b/clang/test/PCH/objc_import.m
@@ -9,7 +9,7 @@
#import "objc_import.h"
-void func() {
+void func(void) {
TestPCH *xx;
xx = [TestPCH alloc];
diff --git a/clang/test/PCH/objc_literals.m b/clang/test/PCH/objc_literals.m
index df3d4ba745d7..3efef1996265 100644
--- a/clang/test/PCH/objc_literals.m
+++ b/clang/test/PCH/objc_literals.m
@@ -41,7 +41,7 @@ typedef unsigned char BOOL;
@end
// CHECK-IR: define internal {{.*}}void @test_numeric_literals()
-static inline void test_numeric_literals() {
+static inline void test_numeric_literals(void) {
// CHECK-PRINT: id intlit = @17
// CHECK-IR: {{call.*17}}
id intlit = @17;
@@ -50,18 +50,18 @@ static inline void test_numeric_literals() {
id floatlit = @17.45;
}
-static inline void test_array_literals() {
+static inline void test_array_literals(void) {
// CHECK-PRINT: id arraylit = @[ @17, @17.449999999999999
id arraylit = @[@17, @17.45];
}
-static inline void test_dictionary_literals() {
+static inline void test_dictionary_literals(void) {
// CHECK-PRINT: id dictlit = @{ @17 : {{@17.449999999999999[^,]*}}, @"hello" : @"world" };
id dictlit = @{@17 : @17.45, @"hello" : @"world" };
}
#else
-void test_all() {
+void test_all(void) {
test_numeric_literals();
test_array_literals();
test_dictionary_literals();
diff --git a/clang/test/PCH/objc_methods.m b/clang/test/PCH/objc_methods.m
index ea40460fb8df..1543e022f199 100644
--- a/clang/test/PCH/objc_methods.m
+++ b/clang/test/PCH/objc_methods.m
@@ -7,7 +7,7 @@
// expected-no-diagnostics
-void func() {
+void func(void) {
TestPCH *xx;
TestForwardClassDecl *yy;
// FIXME:
diff --git a/clang/test/PCH/objc_property.m b/clang/test/PCH/objc_property.m
index 88a091928050..67f2e20a7836 100644
--- a/clang/test/PCH/objc_property.m
+++ b/clang/test/PCH/objc_property.m
@@ -7,7 +7,7 @@
// expected-no-diagnostics
-void func() {
+void func(void) {
TestProperties *xx = [TestProperties alloc];
xx.value = 5;
}
diff --git a/clang/test/PCH/pch-dir.c b/clang/test/PCH/pch-dir.c
index 944753c9c9c0..f8b8c05878f4 100644
--- a/clang/test/PCH/pch-dir.c
+++ b/clang/test/PCH/pch-dir.c
@@ -21,7 +21,7 @@
// CHECK-CBAR: int bar
int FOO;
-int get() {
+int get(void) {
#ifdef __cplusplus
// CHECK-CPP: .h.gch{{[/\\]}}cpp.gch
return i;
diff --git a/clang/test/PCH/pragma-diag.c b/clang/test/PCH/pragma-diag.c
index 601c940cee9b..8758442ecaa4 100644
--- a/clang/test/PCH/pragma-diag.c
+++ b/clang/test/PCH/pragma-diag.c
@@ -14,7 +14,7 @@
#else
-void f() {
+void f(void) {
int a = 0;
int b = a==a;
}
diff --git a/clang/test/PCH/pragma-optimize.c b/clang/test/PCH/pragma-optimize.c
index 9570117448e2..ae5ff10a8d31 100644
--- a/clang/test/PCH/pragma-optimize.c
+++ b/clang/test/PCH/pragma-optimize.c
@@ -21,7 +21,7 @@
int a;
-void f() {
+void f(void) {
a = 12345;
}
diff --git a/clang/test/PCH/rdar8852495.c b/clang/test/PCH/rdar8852495.c
index 7639f1f0db65..9ad440242980 100644
--- a/clang/test/PCH/rdar8852495.c
+++ b/clang/test/PCH/rdar8852495.c
@@ -17,7 +17,7 @@
#else
-int f() {
+int f(void) {
int a;
int b = a==a;
unsigned x;
diff --git a/clang/test/PCH/struct.c b/clang/test/PCH/struct.c
index 3e9d18833234..3a78e077c116 100644
--- a/clang/test/PCH/struct.c
+++ b/clang/test/PCH/struct.c
@@ -11,7 +11,7 @@ float getX(struct Point *p1) {
return p1->x;
}
-void *get_fun_ptr() {
+void *get_fun_ptr(void) {
return fun->is_ptr? fun->ptr : 0;
}
@@ -19,7 +19,7 @@ struct Fun2 {
int very_fun;
};
-int get_very_fun() {
+int get_very_fun(void) {
return fun2->very_fun;
}
diff --git a/clang/test/PCH/subscripting-literals.m b/clang/test/PCH/subscripting-literals.m
index 52491dbc2008..d9f848d183b8 100644
--- a/clang/test/PCH/subscripting-literals.m
+++ b/clang/test/PCH/subscripting-literals.m
@@ -53,7 +53,7 @@ void testDict(NSString *key, id newObject, id oldObject) {
NSDictionary *dict = @{ key: newObject, key: oldObject };
}
-void testBoxableValue() {
+void testBoxableValue(void) {
some_struct ss;
id value = @(ss);
}
diff --git a/clang/test/PCH/typo.m b/clang/test/PCH/typo.m
index 876b9438d1b7..857ae543f073 100644
--- a/clang/test/PCH/typo.m
+++ b/clang/test/PCH/typo.m
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -x objective-c-header -emit-pch -o %t %S/Inputs/typo.h
// RUN: %clang_cc1 -include-pch %t -verify %s
-void f() {
+void f(void) {
[NSstring alloc]; // expected-error{{unknown receiver 'NSstring'; did you mean 'NSString'?}}
// expected-note@Inputs/typo.h:3{{declared here}}
}
diff --git a/clang/test/PCH/undefined-internal.c b/clang/test/PCH/undefined-internal.c
index ef514606dcbb..242b1074e282 100644
--- a/clang/test/PCH/undefined-internal.c
+++ b/clang/test/PCH/undefined-internal.c
@@ -2,14 +2,14 @@
// RUN: %clang_cc1 -include-pch %t %s -verify
#ifndef HEADER_H
#define HEADER_H
-static void f();
-static void g();
-void h() {
+static void f(void);
+static void g(void);
+void h(void) {
f();
g();
}
#else
-static void g() {}
+static void g(void) {}
// expected-warning@5{{function 'f' has internal linkage but is not defined}}
// expected-note@8{{used here}}
#endif
diff --git a/clang/test/Preprocessor/extension-warning.c b/clang/test/Preprocessor/extension-warning.c
index 4ba57f78f77d..5fe905c8435d 100644
--- a/clang/test/Preprocessor/extension-warning.c
+++ b/clang/test/Preprocessor/extension-warning.c
@@ -15,4 +15,4 @@ TY(1) x; // FIXME: And we should warn here
// Current list of keywords this can trigger on:
// inline, restrict, asm, typeof, _asm
-void whatever() {}
+void whatever(void) {}
diff --git a/clang/test/Preprocessor/macro_raw_string.cpp b/clang/test/Preprocessor/macro_raw_string.cpp
index d56894888d1f..95f83807d159 100644
--- a/clang/test/Preprocessor/macro_raw_string.cpp
+++ b/clang/test/Preprocessor/macro_raw_string.cpp
@@ -5,7 +5,7 @@
extern void foo(const char *str);
-void bar() {
+void bar(void) {
FOO(R"(foo
bar)");
}
diff --git a/clang/test/Preprocessor/pragma_assume_nonnull.c b/clang/test/Preprocessor/pragma_assume_nonnull.c
index 4aa124113e2d..1aec2120fd16 100644
--- a/clang/test/Preprocessor/pragma_assume_nonnull.c
+++ b/clang/test/Preprocessor/pragma_assume_nonnull.c
@@ -11,6 +11,6 @@ int bar(int * ip) { return *ip; }
int foo(int * _Nonnull ip) { return *ip; }
-int main() {
+int main(void) {
return bar(0) + foo(0); // expected-warning 2 {{null passed to a callee that requires a non-null argument}}
}
diff --git a/clang/test/Preprocessor/pragma_microsoft.c b/clang/test/Preprocessor/pragma_microsoft.c
index 020292a4b256..ea246c2e6891 100644
--- a/clang/test/Preprocessor/pragma_microsoft.c
+++ b/clang/test/Preprocessor/pragma_microsoft.c
@@ -53,7 +53,7 @@ __pragma(comment(linker," bar=" BAR))
#define PRAGMA_IN_ARGS(p) p
-void f()
+void f(void)
{
__pragma() // expected-warning{{unknown pragma ignored}}
// CHECK: #pragma
@@ -112,7 +112,7 @@ void test( void ) {
// Test to make sure there are no use-after-free problems
#define B "pp-record.h"
#pragma include_alias("quux.h", B)
-void g() {}
+void g(int k) {}
#include "quux.h"
// Make sure that empty includes don't work
diff --git a/clang/test/Preprocessor/user_defined_system_framework.c b/clang/test/Preprocessor/user_defined_system_framework.c
index 2ab2a297ecbc..c321058ce87e 100644
--- a/clang/test/Preprocessor/user_defined_system_framework.c
+++ b/clang/test/Preprocessor/user_defined_system_framework.c
@@ -4,6 +4,6 @@
// Check that TestFramework is treated as a system header.
#include <TestFramework/TestFramework.h>
-int f1() {
+int f1(void) {
return test_framework_func(1) + another_test_framework_func(2);
}
diff --git a/clang/test/Profile/c-captured.c b/clang/test/Profile/c-captured.c
index f34f0cb8d465..c19ecd181c34 100644
--- a/clang/test/Profile/c-captured.c
+++ b/clang/test/Profile/c-captured.c
@@ -9,7 +9,7 @@
// PGOALL-LABEL: define{{.*}} void @debug_captured()
// PGOGEN: store {{.*}} @[[DCC]], i32 0, i32 0
-void debug_captured() {
+void debug_captured(void) {
int x = 10;
// Check both debug_captured counters, so we can do this all in one pass
diff --git a/clang/test/Profile/c-collision.c b/clang/test/Profile/c-collision.c
index fabecd752b4e..6c779c6facaa 100644
--- a/clang/test/Profile/c-collision.c
+++ b/clang/test/Profile/c-collision.c
@@ -6,7 +6,7 @@
// CHECK-EXTRA: @__profd_foo = private global { {{.*}} } { i64 6699318081062747564, i64 -4383447408116050035,
extern int bar;
-void foo() {
+void foo(void) {
if (bar) {
}
if (bar) {
diff --git a/clang/test/Profile/c-general.c b/clang/test/Profile/c-general.c
index 91e5984272d0..67095b7bc7d0 100644
--- a/clang/test/Profile/c-general.c
+++ b/clang/test/Profile/c-general.c
@@ -25,7 +25,7 @@
// PGOGEN-LABEL: @simple_loops()
// PGOUSE-LABEL: @simple_loops()
// PGOGEN: store {{.*}} @[[SLC]], i32 0, i32 0
-void simple_loops() {
+void simple_loops(void) {
int i;
// PGOGEN: store {{.*}} @[[SLC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[SL1:[0-9]+]]
@@ -46,7 +46,7 @@ void simple_loops() {
// PGOGEN-LABEL: @conditionals()
// PGOUSE-LABEL: @conditionals()
// PGOGEN: store {{.*}} @[[IFC]], i32 0, i32 0
-void conditionals() {
+void conditionals(void) {
// PGOGEN: store {{.*}} @[[IFC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[IF1:[0-9]+]]
for (int i = 0; i < 100; ++i) {
@@ -87,7 +87,7 @@ void conditionals() {
// PGOGEN-LABEL: @early_exits()
// PGOUSE-LABEL: @early_exits()
// PGOGEN: store {{.*}} @[[EEC]], i32 0, i32 0
-void early_exits() {
+void early_exits(void) {
int i = 0;
// PGOGEN: store {{.*}} @[[EEC]], i32 0, i32 1
@@ -134,7 +134,7 @@ void early_exits() {
// PGOGEN-LABEL: @jumps()
// PGOUSE-LABEL: @jumps()
// PGOGEN: store {{.*}} @[[JMC]], i32 0, i32 0
-void jumps() {
+void jumps(void) {
int i;
// PGOGEN: store {{.*}} @[[JMC]], i32 0, i32 1
@@ -216,7 +216,7 @@ third:
// PGOGEN-LABEL: @switches()
// PGOUSE-LABEL: @switches()
// PGOGEN: store {{.*}} @[[SWC]], i32 0, i32 0
-void switches() {
+void switches(void) {
static int weights[] = {1, 2, 2, 3, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 5};
// No cases -> no weights
@@ -289,7 +289,7 @@ void switches() {
// PGOGEN-LABEL: @big_switch()
// PGOUSE-LABEL: @big_switch()
// PGOGEN: store {{.*}} @[[BSC]], i32 0, i32 0
-void big_switch() {
+void big_switch(void) {
// PGOGEN: store {{.*}} @[[BSC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[BS1:[0-9]+]]
for (int i = 0; i < 32; ++i) {
@@ -356,7 +356,7 @@ void big_switch() {
// PGOGEN-LABEL: @boolean_operators()
// PGOUSE-LABEL: @boolean_operators()
// PGOGEN: store {{.*}} @[[BOC]], i32 0, i32 0
-void boolean_operators() {
+void boolean_operators(void) {
int v;
// PGOGEN: store {{.*}} @[[BOC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[BO1:[0-9]+]]
@@ -395,7 +395,7 @@ void boolean_operators() {
// PGOGEN-LABEL: @boolop_loops()
// PGOUSE-LABEL: @boolop_loops()
// PGOGEN: store {{.*}} @[[BLC]], i32 0, i32 0
-void boolop_loops() {
+void boolop_loops(void) {
int i = 100;
// PGOGEN: store {{.*}} @[[BLC]], i32 0, i32 2
@@ -435,7 +435,7 @@ void boolop_loops() {
// PGOGEN-LABEL: @conditional_operator()
// PGOUSE-LABEL: @conditional_operator()
// PGOGEN: store {{.*}} @[[COC]], i32 0, i32 0
-void conditional_operator() {
+void conditional_operator(void) {
int i = 100;
// PGOGEN: store {{.*}} @[[COC]], i32 0, i32 1
@@ -453,7 +453,7 @@ void conditional_operator() {
// PGOGEN-LABEL: @do_fallthrough()
// PGOUSE-LABEL: @do_fallthrough()
// PGOGEN: store {{.*}} @[[DFC]], i32 0, i32 0
-void do_fallthrough() {
+void do_fallthrough(void) {
// PGOGEN: store {{.*}} @[[DFC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[DF1:[0-9]+]]
for (int i = 0; i < 10; ++i) {
@@ -475,7 +475,7 @@ void do_fallthrough() {
// PGOGEN-LABEL: @static_func()
// PGOUSE-LABEL: @static_func()
// PGOGEN: store {{.*}} @[[STC]], i32 0, i32 0
-static void static_func() {
+static void static_func(void) {
// PGOGEN: store {{.*}} @[[STC]], i32 0, i32 1
// PGOUSE: br {{.*}} !prof ![[ST1:[0-9]+]]
for (int i = 0; i < 10; ++i) {
diff --git a/clang/test/Profile/c-outdated-data.c b/clang/test/Profile/c-outdated-data.c
index 2b9773478930..454e4d799e0b 100644
--- a/clang/test/Profile/c-outdated-data.c
+++ b/clang/test/Profile/c-outdated-data.c
@@ -13,13 +13,13 @@
// WITH_MISSING: warning: profile data may be out of date: of 3 functions, 2 have mismatched data that will be ignored
// WITH_MISSING: warning: profile data may be incomplete: of 3 functions, 1 has no data
-void no_usable_data() {
+void no_usable_data(void) {
int i = 0;
if (i) {}
}
-void no_data() {
+void no_data(void) {
}
int main(int argc, const char *argv[]) {
diff --git a/clang/test/Profile/c-unreachable-after-switch.c b/clang/test/Profile/c-unreachable-after-switch.c
index cfc111b2752e..1764c27f0cdd 100644
--- a/clang/test/Profile/c-unreachable-after-switch.c
+++ b/clang/test/Profile/c-unreachable-after-switch.c
@@ -4,7 +4,7 @@
// CHECK-LABEL: @foo()
// CHECK: store {{.*}} @[[C]], i64 0, i64 0
-void foo() {
+void foo(void) {
// CHECK: store {{.*}} @[[C]], i64 0, i64 2
switch (0) {
default:
diff --git a/clang/test/Profile/coverage-prefix-map.c b/clang/test/Profile/coverage-prefix-map.c
index b16fd2a2c8f3..6f2c0fba0b09 100644
--- a/clang/test/Profile/coverage-prefix-map.c
+++ b/clang/test/Profile/coverage-prefix-map.c
@@ -2,7 +2,7 @@
// clean directory, put the source there, and cd into it.
// RUN: rm -rf %t
// RUN: mkdir -p %t/root/nested
-// RUN: echo "void f1() {}" > %t/root/nested/coverage-prefix-map.c
+// RUN: echo "void f1(void) {}" > %t/root/nested/coverage-prefix-map.c
// RUN: cd %t/root
// RUN: %clang_cc1 -fprofile-instrument=clang -fcoverage-mapping -emit-llvm -mllvm -enable-name-compression=false -main-file-name coverage-prefix-map.c %t/root/nested/coverage-prefix-map.c -o - | FileCheck --check-prefix=ABSOLUTE %s
diff --git a/clang/test/Profile/gcc-flag-compatibility-aix.c b/clang/test/Profile/gcc-flag-compatibility-aix.c
index db12e05c247b..ba889312ce4e 100644
--- a/clang/test/Profile/gcc-flag-compatibility-aix.c
+++ b/clang/test/Profile/gcc-flag-compatibility-aix.c
@@ -54,7 +54,7 @@
int X = 0;
-int main() {
+int main(void) {
int i;
for (i = 0; i < 100; i++)
X += i;
diff --git a/clang/test/Profile/gcc-flag-compatibility.c b/clang/test/Profile/gcc-flag-compatibility.c
index 9f83117c1ccd..674e92647dcb 100644
--- a/clang/test/Profile/gcc-flag-compatibility.c
+++ b/clang/test/Profile/gcc-flag-compatibility.c
@@ -52,7 +52,7 @@
int X = 0;
-int main() {
+int main(void) {
int i;
for (i = 0; i < 100; i++)
X += i;
diff --git a/clang/test/Refactor/Extract/ExtractionSemicolonPolicy.m b/clang/test/Refactor/Extract/ExtractionSemicolonPolicy.m
index 10e6a164f248..26f601a2d150 100644
--- a/clang/test/Refactor/Extract/ExtractionSemicolonPolicy.m
+++ b/clang/test/Refactor/Extract/ExtractionSemicolonPolicy.m
@@ -16,7 +16,7 @@ void extractStatementNoSemiObjCFor(NSArray *array) {
// CHECK-NEXT: }{{$}}
// CHECK-NEXT: }{{[[:space:]].*}}
-void extractStatementNoSemiSync() {
+void extractStatementNoSemiSync(void) {
id lock;
/*range bstmt=->+2:4*/@synchronized(lock) {
int x = 0;
@@ -29,7 +29,7 @@ void extractStatementNoSemiSync() {
// CHECK-NEXT: }{{$}}
// CHECK-NEXT: }{{[[:space:]].*}}
-void extractStatementNoSemiAutorel() {
+void extractStatementNoSemiAutorel(void) {
/*range cstmt=->+2:4*/@autoreleasepool {
int x = 0;
}
@@ -41,7 +41,7 @@ void extractStatementNoSemiAutorel() {
// CHECK-NEXT: }{{$}}
// CHECK-NEXT: }{{[[:space:]].*}}
-void extractStatementNoSemiTryFinalllllly() {
+void extractStatementNoSemiTryFinalllllly(void) {
/*range dstmt=->+3:4*/@try {
int x = 0;
} @finally {
diff --git a/clang/test/Rewriter/blockstruct.m b/clang/test/Rewriter/blockstruct.m
index 2c443616cf86..477afb45824b 100644
--- a/clang/test/Rewriter/blockstruct.m
+++ b/clang/test/Rewriter/blockstruct.m
@@ -7,9 +7,9 @@ void a(b_t work) { }
struct _s {
int a;
};
-struct _s *r();
+struct _s *r(void);
-void f() {
+void f(void) {
__block struct _s *s = 0;
a(^{
s = (struct _s *)r();
diff --git a/clang/test/Rewriter/crash.m b/clang/test/Rewriter/crash.m
index 7908d9fea5e2..55d7a03fffc9 100644
--- a/clang/test/Rewriter/crash.m
+++ b/clang/test/Rewriter/crash.m
@@ -7,7 +7,7 @@
@interface NSConstantString {}
@end
-int main() {
+int main(void) {
id foo = [NSArray arrayWithObjects:@"1", @"2", @"3", @"4", @"5", @"6", @"7", @"8", @"9", @"10", @"11", @"12", 0];
return 0;
}
@@ -19,7 +19,7 @@ int main() {
@interface Foo
@end
-void func() {
+void func(void) {
id <A> obj = (id <A>)[Foo bar];
}
diff --git a/clang/test/Rewriter/finally.m b/clang/test/Rewriter/finally.m
index e60ba9ec8526..33b919e4b3ef 100644
--- a/clang/test/Rewriter/finally.m
+++ b/clang/test/Rewriter/finally.m
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -rewrite-objc -fobjc-runtime=macosx-fragile-10.5 -fobjc-exceptions -verify %s -o -
-int main() {
+int main(void) {
@try {
printf("executing try"); // expected-warning{{implicitly declaring library function 'printf' with type 'int (const char *, ...)'}} \
// expected-note{{include the header <stdio.h> or explicitly provide a declaration for 'printf'}}
@@ -25,14 +25,14 @@ int main() {
return 0;
}
-void test_sync_with_implicit_finally() {
+void test_sync_with_implicit_finally(void) {
id foo;
@synchronized (foo) {
return; // The rewriter knows how to generate code for implicit finally
}
}
-void test2_try_with_implicit_finally() {
+void test2_try_with_implicit_finally(void) {
@try {
return; // The rewriter knows how to generate code for implicit finally
} @catch (id e) {
diff --git a/clang/test/Rewriter/objc-synchronized-1.m b/clang/test/Rewriter/objc-synchronized-1.m
index 0e985ab67f2b..9172a1316cb0 100644
--- a/clang/test/Rewriter/objc-synchronized-1.m
+++ b/clang/test/Rewriter/objc-synchronized-1.m
@@ -1,9 +1,9 @@
// RUN: %clang_cc1 -rewrite-objc -fobjc-runtime=macosx-fragile-10.5 %s -o -
-id SYNCH_EXPR();
-void SYNCH_BODY();
-void SYNCH_BEFORE();
-void SYNC_AFTER();
+id SYNCH_EXPR(void);
+void SYNCH_BODY(void);
+void SYNCH_BEFORE(void);
+void SYNC_AFTER(void);
void foo(id sem)
{
diff --git a/clang/test/Rewriter/rewrite-captured-nested-bvar.c b/clang/test/Rewriter/rewrite-captured-nested-bvar.c
index 1b624e0ca9c7..60029e7c9fc0 100644
--- a/clang/test/Rewriter/rewrite-captured-nested-bvar.c
+++ b/clang/test/Rewriter/rewrite-captured-nested-bvar.c
@@ -6,7 +6,7 @@ void q(void (^p)(void)) {
p();
}
-void f() {
+void f(void) {
__block char BYREF_VAR_CHECK = 'a';
__block char d = 'd';
q(^{
@@ -25,7 +25,7 @@ void f() {
});
}
-int main() {
+int main(void) {
f();
return 0;
}
diff --git a/clang/test/Rewriter/rewrite-foreach-1.m b/clang/test/Rewriter/rewrite-foreach-1.m
index 5263fffbe7dd..dcfa6e8a1afe 100644
--- a/clang/test/Rewriter/rewrite-foreach-1.m
+++ b/clang/test/Rewriter/rewrite-foreach-1.m
@@ -16,7 +16,7 @@
- (void)compilerTestAgainst;
@end
-int LOOP();
+int LOOP(void);
@implementation MyList (BasicTest)
- (void)compilerTestAgainst {
id el;
diff --git a/clang/test/Rewriter/rewrite-foreach-2.m b/clang/test/Rewriter/rewrite-foreach-2.m
index 120d7d7e7985..a0f7db301dd0 100644
--- a/clang/test/Rewriter/rewrite-foreach-2.m
+++ b/clang/test/Rewriter/rewrite-foreach-2.m
@@ -16,9 +16,9 @@
- (void)compilerTestAgainst;
@end
-int LOOP();
-int INNERLOOP();
-void END_LOOP();
+int LOOP(void);
+int INNERLOOP(void);
+void END_LOOP(void);
@implementation MyList (BasicTest)
- (void)compilerTestAgainst {
id el;
diff --git a/clang/test/Rewriter/rewrite-foreach-3.m b/clang/test/Rewriter/rewrite-foreach-3.m
index 6e67415002a3..4edbe80f2212 100644
--- a/clang/test/Rewriter/rewrite-foreach-3.m
+++ b/clang/test/Rewriter/rewrite-foreach-3.m
@@ -16,7 +16,7 @@
- (void)compilerTestAgainst;
@end
-int LOOP();
+int LOOP(void);
@implementation MyList (BasicTest)
- (void)compilerTestAgainst {
MyList * el;
diff --git a/clang/test/Rewriter/rewrite-foreach-4.m b/clang/test/Rewriter/rewrite-foreach-4.m
index e852c75cd2ef..50f95e4eb56d 100644
--- a/clang/test/Rewriter/rewrite-foreach-4.m
+++ b/clang/test/Rewriter/rewrite-foreach-4.m
@@ -16,7 +16,7 @@
- (void)compilerTestAgainst;
@end
-int LOOP();
+int LOOP(void);
@implementation MyList (BasicTest)
- (void)compilerTestAgainst {
MyList * el;
diff --git a/clang/test/Rewriter/rewrite-foreach-7.m b/clang/test/Rewriter/rewrite-foreach-7.m
index 8f3a7c8819fa..079825b01ea3 100644
--- a/clang/test/Rewriter/rewrite-foreach-7.m
+++ b/clang/test/Rewriter/rewrite-foreach-7.m
@@ -1,7 +1,7 @@
// RUN: %clang_cc1 -rewrite-objc -fobjc-runtime=macosx-fragile-10.5 %s -o -
@class NSArray;
-int main() {
+int main(void) {
NSArray *foo;
for (Class c in foo) { }
}
diff --git a/clang/test/Rewriter/rewrite-modern-synchronized.m b/clang/test/Rewriter/rewrite-modern-synchronized.m
index 17c8e9a4ad4f..59bc87d503f6 100644
--- a/clang/test/Rewriter/rewrite-modern-synchronized.m
+++ b/clang/test/Rewriter/rewrite-modern-synchronized.m
@@ -7,10 +7,10 @@ typedef struct objc_object {
void *sel_registerName(const char *);
-id SYNCH_EXPR();
-void SYNCH_BODY();
-void SYNCH_BEFORE();
-void SYNC_AFTER();
+id SYNCH_EXPR(void);
+void SYNCH_BODY(void);
+void SYNCH_BEFORE(void);
+void SYNC_AFTER(void);
void foo(id sem)
{
@@ -26,7 +26,7 @@ void foo(id sem)
}
}
-void test_sync_with_implicit_finally() {
+void test_sync_with_implicit_finally(void) {
id foo;
@synchronized (foo) {
return; // The rewriter knows how to generate code for implicit finally
diff --git a/clang/test/Rewriter/rewrite-modern-throw.m b/clang/test/Rewriter/rewrite-modern-throw.m
index 19d6b1f6d3c6..e2a2acd647b6 100644
--- a/clang/test/Rewriter/rewrite-modern-throw.m
+++ b/clang/test/Rewriter/rewrite-modern-throw.m
@@ -9,17 +9,17 @@ typedef struct objc_object {
void *sel_registerName(const char *);
@interface Foo @end
-void TRY();
-void SPLATCH();
-void MYTRY();
-void MYCATCH();
+void TRY(void);
+void SPLATCH(void);
+void MYTRY(void);
+void MYCATCH(void);
-void foo() {
+void foo(void) {
@try { TRY(); }
@catch (...) { SPLATCH(); @throw; }
}
-int main()
+int main(void)
{
@try {
diff --git a/clang/test/Rewriter/rewrite-modern-try-catch-finally.m b/clang/test/Rewriter/rewrite-modern-try-catch-finally.m
index 9beab7d75100..cdf79dbd306a 100644
--- a/clang/test/Rewriter/rewrite-modern-try-catch-finally.m
+++ b/clang/test/Rewriter/rewrite-modern-try-catch-finally.m
@@ -8,7 +8,7 @@ typedef struct objc_object {
extern int printf(const char *, ...);
-int main() {
+int main(void) {
@try {
}
@finally {
@@ -30,7 +30,7 @@ int main() {
return 0;
}
-void test2_try_with_implicit_finally() {
+void test2_try_with_implicit_finally(void) {
@try {
return;
} @catch (id e) {
@@ -38,9 +38,9 @@ void test2_try_with_implicit_finally() {
}
}
-void FINALLY();
-void TRY();
-void CATCH();
+void FINALLY(void);
+void TRY(void);
+void CATCH(void);
@interface NSException
@end
diff --git a/clang/test/Rewriter/rewrite-modern-try-finally.m b/clang/test/Rewriter/rewrite-modern-try-finally.m
index 41737e95f0cd..b964c6f52927 100644
--- a/clang/test/Rewriter/rewrite-modern-try-finally.m
+++ b/clang/test/Rewriter/rewrite-modern-try-finally.m
@@ -6,11 +6,11 @@ typedef struct objc_object {
Class isa;
} *id;
-void FINALLY();
-void TRY();
-void INNER_FINALLY();
-void INNER_TRY();
-void CHECK();
+void FINALLY(void);
+void TRY(void);
+void INNER_FINALLY(void);
+void INNER_TRY(void);
+void CHECK(void);
@interface Foo
@end
diff --git a/clang/test/Rewriter/rewrite-try-catch.m b/clang/test/Rewriter/rewrite-try-catch.m
index 8720d0c9b91b..8ac87f59f272 100644
--- a/clang/test/Rewriter/rewrite-try-catch.m
+++ b/clang/test/Rewriter/rewrite-try-catch.m
@@ -3,12 +3,12 @@
@interface Foo @end
@interface GARF @end
-void foo() {
+void foo(void) {
@try { TRY(); }
@catch (...) { SPLATCH(); @throw; }
}
-int main()
+int main(void)
{
@try {
diff --git a/clang/test/Rewriter/rewrite-weak-attr.m b/clang/test/Rewriter/rewrite-weak-attr.m
index f8eb3b727e0f..196f1d3c6e95 100644
--- a/clang/test/Rewriter/rewrite-weak-attr.m
+++ b/clang/test/Rewriter/rewrite-weak-attr.m
@@ -1,10 +1,10 @@
// RUN: %clang_cc1 -triple i686-pc-win32 -fms-extensions -fblocks -Dnil=0 -rewrite-objc -fobjc-runtime=macosx-fragile-10.5 -o - %s
-int main() {
+int main(void) {
__weak __block id foo = nil;
__block id foo2 = nil;
id foo3 = nil;
- void (^myblock)() = ^{
+ void (^myblock)(void) = ^{
foo = nil;
foo2 = nil;
[foo3 bar];
diff --git a/clang/test/Rewriter/undef-field-reference-1.m b/clang/test/Rewriter/undef-field-reference-1.m
index 07bd21b756c9..3bffd3897ed2 100644
--- a/clang/test/Rewriter/undef-field-reference-1.m
+++ b/clang/test/Rewriter/undef-field-reference-1.m
@@ -8,7 +8,7 @@
@end
MyDerived *pd;
-int main() {
+int main(void) {
return pd->IVAR;
}
diff --git a/clang/test/Rewriter/weak_byref_objects.m b/clang/test/Rewriter/weak_byref_objects.m
index 09d30ed268b0..52111c10edc0 100644
--- a/clang/test/Rewriter/weak_byref_objects.m
+++ b/clang/test/Rewriter/weak_byref_objects.m
@@ -1,12 +1,12 @@
// RUN: %clang_cc1 -fblocks -triple i386-apple-darwin9 -fobjc-gc -rewrite-objc -fobjc-runtime=macosx-fragile-10.5 %s -o -
#define nil 0
-int main() {
+int main(void) {
__weak __block id foo = nil;
__block id foo2 = nil;
id foo3 = nil;
- void (^myblock)() = ^{
+ void (^myblock)(void) = ^{
foo = nil;
foo2 = nil;
[foo3 bar];
diff --git a/clang/test/VFS/framework-import.m b/clang/test/VFS/framework-import.m
index cd923c1dbe0f..b745b56f55bf 100644
--- a/clang/test/VFS/framework-import.m
+++ b/clang/test/VFS/framework-import.m
@@ -3,6 +3,6 @@
#import <SomeFramework/public_header.h>
-void foo() {
+void foo(void) {
from_framework();
}
diff --git a/clang/test/VFS/implicit-include.c b/clang/test/VFS/implicit-include.c
index 06bff4b962db..f7cd1822cfde 100644
--- a/clang/test/VFS/implicit-include.c
+++ b/clang/test/VFS/implicit-include.c
@@ -1,6 +1,6 @@
// RUN: sed -e "s@INPUT_DIR@%{/S:regex_replacement}/Inputs@g" -e "s@OUT_DIR@%{/t:regex_replacement}@g" %S/Inputs/vfsoverlay.yaml > %t.yaml
// RUN: %clang_cc1 -Werror -ivfsoverlay %t.yaml -I %t -include "not_real.h" -fsyntax-only %s
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/VFS/include-mixed-real-and-virtual.c b/clang/test/VFS/include-mixed-real-and-virtual.c
index b46ee9af9990..0999d8562e8e 100644
--- a/clang/test/VFS/include-mixed-real-and-virtual.c
+++ b/clang/test/VFS/include-mixed-real-and-virtual.c
@@ -7,7 +7,7 @@
#include "not_real.h"
#include "real.h"
-void foo() {
+void foo(void) {
bar();
baz();
}
diff --git a/clang/test/VFS/include-real-from-virtual.c b/clang/test/VFS/include-real-from-virtual.c
index 7398be735c5f..4344619e09e1 100644
--- a/clang/test/VFS/include-real-from-virtual.c
+++ b/clang/test/VFS/include-real-from-virtual.c
@@ -6,6 +6,6 @@
#include "include_real.h"
-void foo() {
+void foo(void) {
baz();
}
diff --git a/clang/test/VFS/include-virtual-from-real.c b/clang/test/VFS/include-virtual-from-real.c
index b50d5b729253..200afe9d0813 100644
--- a/clang/test/VFS/include-virtual-from-real.c
+++ b/clang/test/VFS/include-virtual-from-real.c
@@ -6,6 +6,6 @@
#include "include_not_real.h"
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/VFS/include.c b/clang/test/VFS/include.c
index a55e73a38178..88d221798f7d 100644
--- a/clang/test/VFS/include.c
+++ b/clang/test/VFS/include.c
@@ -3,6 +3,6 @@
#include "not_real.h"
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/VFS/module-import.m b/clang/test/VFS/module-import.m
index 25d37bbf0a77..e3f250c55ff9 100644
--- a/clang/test/VFS/module-import.m
+++ b/clang/test/VFS/module-import.m
@@ -4,7 +4,7 @@
@import not_real;
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/VFS/relative-path.c b/clang/test/VFS/relative-path.c
index 24313affc69d..ab207d096848 100644
--- a/clang/test/VFS/relative-path.c
+++ b/clang/test/VFS/relative-path.c
@@ -5,6 +5,6 @@
#include "not_real.h"
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/VFS/vfsroot-with-overlay.c b/clang/test/VFS/vfsroot-with-overlay.c
index d181f4d8382c..1a44c523aaac 100644
--- a/clang/test/VFS/vfsroot-with-overlay.c
+++ b/clang/test/VFS/vfsroot-with-overlay.c
@@ -6,6 +6,6 @@
#include "not_real.h"
-void foo() {
+void foo(void) {
bar();
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/check-globals.c b/clang/test/utils/update_cc_test_checks/Inputs/check-globals.c
index a63cec246e46..fae086256ce2 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/check-globals.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/check-globals.c
@@ -2,9 +2,9 @@
// RUN: true
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s | FileCheck %s
-void foo() {
+void foo(void) {
static int i, j;
}
-void bar() {
+void bar(void) {
static int i, j;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c
index 7fc539347e6c..f1c95c445c2f 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c
@@ -5,7 +5,7 @@ int foo(int arg);
void empty_function(void);
-int main() {
+int main(void) {
empty_function();
return foo(1);
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected
index 135f3bc28dee..fd9bcde01ac7 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/def-and-decl.c.expected
@@ -14,7 +14,7 @@ void empty_function(void);
// CHECK-NEXT: [[CALL:%.*]] = call i32 @foo(i32 noundef 1)
// CHECK-NEXT: ret i32 [[CALL]]
//
-int main() {
+int main(void) {
empty_function();
return foo(1);
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c
index 39e7a207f846..7440ef6592b6 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c
@@ -1,6 +1,6 @@
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck %s
-void __test_offloading_42_abcdef_bar_l123();
+void __test_offloading_42_abcdef_bar_l123(void);
void use(int);
void foo(int a)
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected
index 03f68ef0bcd1..06aff0d9d5ac 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs-regex.c.expected
@@ -1,7 +1,7 @@
// NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --include-generated-funcs --replace-value-regex "__([a-z]+)_offloading_[a-z0-9]+_[a-z0-9]+_(.*)_l[0-9]+" "somevar_[a-z0-9]+_"
// RUN: %clang_cc1 -triple x86_64-unknown-linux-gnu -fopenmp %s -emit-llvm -o - | FileCheck %s
-void __test_offloading_42_abcdef_bar_l123();
+void __test_offloading_42_abcdef_bar_l123(void);
void use(int);
void foo(int a)
@@ -23,7 +23,7 @@ void foo(int a)
// CHECK-NEXT: store i32 [[TMP0]], i32* [[CONV]], align 4
// CHECK-NEXT: [[TMP1:%.*]] = load i64, i64* [[A_CASTED]], align 8
// CHECK-NEXT: call void @{{__omp_offloading_[a-z0-9]+_[a-z0-9]+_foo_l[0-9]+}}(i64 [[TMP1]]) #[[ATTR3:[0-9]+]]
-// CHECK-NEXT: call void (...) @{{__test_offloading_[a-z0-9]+_[a-z0-9]+_bar_l[0-9]+}}()
+// CHECK-NEXT: call void @{{__test_offloading_[a-z0-9]+_[a-z0-9]+_bar_l[0-9]+}}()
// CHECK-NEXT: ret void
//
//
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c
index 42b9f4fffaa1..c40759aeaacf 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c
@@ -8,7 +8,7 @@ double A[size];
void foo(void);
-int main() {
+int main(void) {
int i = 0;
#pragma omp parallel for
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected
index 9ca1232ea827..f8b50470ee20 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.generated.expected
@@ -9,7 +9,7 @@ double A[size];
void foo(void);
-int main() {
+int main(void) {
int i = 0;
#pragma omp parallel for
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected
index da066b422dbb..2c7556208c83 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/generated-funcs.c.no-generated.expected
@@ -46,7 +46,7 @@ void foo(void);
// NOOMP-NEXT: call void @foo()
// NOOMP-NEXT: ret i32 0
//
-int main() {
+int main(void) {
int i = 0;
#pragma omp parallel for
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c b/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c
index ad4c109c45ec..ed5735e01706 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c
@@ -1,10 +1,10 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s | FileCheck %s
-void foo() {
+void foo(void) {
static int hex = 0x10;
static int dec = 10;
}
-void bar() {
+void bar(void) {
static int hex = 0x20;
static int dec = 20;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c.expected
index 3018d0261adf..f494971a6a88 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/global-hex-value-regex.c.expected
@@ -11,7 +11,7 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
-void foo() {
+void foo(void) {
static int hex = 0x10;
static int dec = 10;
}
@@ -19,7 +19,7 @@ void foo() {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
-void bar() {
+void bar(void) {
static int hex = 0x20;
static int dec = 20;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c b/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c
index cacaac63b036..a8cf554ecb2b 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c
@@ -1,8 +1,8 @@
// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s | FileCheck %s
-void foo() {
+void foo(void) {
static int i, j;
}
-void bar() {
+void bar(void) {
static int i, j;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c.expected
index b0f74c538198..94e08ce9553b 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/global-value-regex.c.expected
@@ -9,13 +9,13 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
-void foo() {
+void foo(void) {
static int i, j;
}
// CHECK-LABEL: @bar(
// CHECK-NEXT: entry:
// CHECK-NEXT: ret void
//
-void bar() {
+void bar(void) {
static int i, j;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c b/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c
index 8956e6b52a21..f05ba41dec1f 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c
+++ b/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c
@@ -1,12 +1,12 @@
// RUN: %clang_cc1 -triple=x86_64-unknown-linux-gnu -emit-llvm -o - %s | FileCheck %s
-int checks_please() {
+int checks_please(void) {
return 1;
}
// UTC_ARGS: --disable
-int no_checks_please() {
+int no_checks_please(void) {
// Manual CHECK line should be retained:
// CHECK: manual check line
return -1;
@@ -15,6 +15,6 @@ int no_checks_please() {
// UTC_ARGS: --enable
-int checks_again() {
+int checks_again(void) {
return 2;
}
diff --git a/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c.expected b/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c.expected
index cb7846c7b3d5..4688ec932b59 100644
--- a/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c.expected
+++ b/clang/test/utils/update_cc_test_checks/Inputs/on_the_fly_arg_change.c.expected
@@ -5,13 +5,13 @@
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 1
//
-int checks_please() {
+int checks_please(void) {
return 1;
}
// UTC_ARGS: --disable
-int no_checks_please() {
+int no_checks_please(void) {
// Manual CHECK line should be retained:
// CHECK: manual check line
return -1;
@@ -24,6 +24,6 @@ int no_checks_please() {
// CHECK-NEXT: entry:
// CHECK-NEXT: ret i32 2
//
-int checks_again() {
+int checks_again(void) {
return 2;
}
diff --git a/clang/test/utils/update_cc_test_checks/check-globals.test b/clang/test/utils/update_cc_test_checks/check-globals.test
index def1a8e93672..9a2e0cca4c4c 100644
--- a/clang/test/utils/update_cc_test_checks/check-globals.test
+++ b/clang/test/utils/update_cc_test_checks/check-globals.test
@@ -44,10 +44,10 @@ END.
BOTH-NEXT:// RUN: true
BOTH-NEXT:// RUN: %clang_cc1 -triple x86_64-pc-linux-gnu -emit-llvm -o - %s | FileCheck %s
BOTH-EMPTY:
- IGF-NEXT:void foo() {
+ IGF-NEXT:void foo(void) {
IGF-NEXT: static int i, j;
IGF-NEXT:}
- IGF-NEXT:void bar() {
+ IGF-NEXT:void bar(void) {
IGF-NEXT: static int i, j;
IGF-NEXT:}
BOTH-NEXT://.
@@ -60,7 +60,7 @@ BOTH-EMPTY:
BOTH-NEXT:// CHECK-NEXT: entry:
BOTH-NEXT:// CHECK-NEXT: ret void
BOTH-NEXT://
- NRM-NEXT:void foo() {
+ NRM-NEXT:void foo(void) {
NRM-NEXT: static int i, j;
NRM-NEXT:}
IGF-NEXT://
@@ -68,7 +68,7 @@ BOTH-EMPTY:
BOTH-NEXT:// CHECK-NEXT: entry:
BOTH-NEXT:// CHECK-NEXT: ret void
BOTH-NEXT://
- NRM-NEXT:void bar() {
+ NRM-NEXT:void bar(void) {
NRM-NEXT: static int i, j;
NRM-NEXT:}
BOTH-NEXT://.
diff --git a/clang/unittests/Format/FormatTest.cpp b/clang/unittests/Format/FormatTest.cpp
index 06b2fe5c650c..0d315734bc95 100644
--- a/clang/unittests/Format/FormatTest.cpp
+++ b/clang/unittests/Format/FormatTest.cpp
@@ -3826,7 +3826,10 @@ TEST_F(FormatTest, FormatsNamespaces) {
"struct b_struct {};\n"
"} // namespace B\n",
Style);
- verifyFormat("template <int I> constexpr void foo requires(I == 42) {}\n"
+ verifyFormat("template <int I>\n"
+ "constexpr void foo()\n"
+ " requires(I == 42)\n"
+ "{}\n"
"namespace ns {\n"
"void foo() {}\n"
"} // namespace ns\n",
@@ -19254,7 +19257,6 @@ TEST_F(FormatTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(BinPackArguments);
CHECK_PARSE_BOOL(BinPackParameters);
CHECK_PARSE_BOOL(BreakAfterJavaFieldAnnotations);
- CHECK_PARSE_BOOL(BreakBeforeConceptDeclarations);
CHECK_PARSE_BOOL(BreakBeforeTernaryOperators);
CHECK_PARSE_BOOL(BreakStringLiterals);
CHECK_PARSE_BOOL(CompactNamespaces);
@@ -19266,7 +19268,8 @@ TEST_F(FormatTest, ParsesConfigurationBools) {
CHECK_PARSE_BOOL(IndentCaseLabels);
CHECK_PARSE_BOOL(IndentCaseBlocks);
CHECK_PARSE_BOOL(IndentGotoLabels);
- CHECK_PARSE_BOOL(IndentRequires);
+ CHECK_PARSE_BOOL_FIELD(IndentRequiresClause, "IndentRequires");
+ CHECK_PARSE_BOOL(IndentRequiresClause);
CHECK_PARSE_BOOL(IndentWrappedFunctionNames);
CHECK_PARSE_BOOL(KeepEmptyLinesAtTheStartOfBlocks);
CHECK_PARSE_BOOL(ObjCSpaceAfterProperty);
@@ -19932,6 +19935,27 @@ TEST_F(FormatTest, ParsesConfiguration) {
// For backward compatibility:
CHECK_PARSE("SpacesInAngles: false", SpacesInAngles, FormatStyle::SIAS_Never);
CHECK_PARSE("SpacesInAngles: true", SpacesInAngles, FormatStyle::SIAS_Always);
+
+ CHECK_PARSE("RequiresClausePosition: WithPreceding", RequiresClausePosition,
+ FormatStyle::RCPS_WithPreceding);
+ CHECK_PARSE("RequiresClausePosition: WithFollowing", RequiresClausePosition,
+ FormatStyle::RCPS_WithFollowing);
+ CHECK_PARSE("RequiresClausePosition: SingleLine", RequiresClausePosition,
+ FormatStyle::RCPS_SingleLine);
+ CHECK_PARSE("RequiresClausePosition: OwnLine", RequiresClausePosition,
+ FormatStyle::RCPS_OwnLine);
+
+ CHECK_PARSE("BreakBeforeConceptDeclarations: Never",
+ BreakBeforeConceptDeclarations, FormatStyle::BBCDS_Never);
+ CHECK_PARSE("BreakBeforeConceptDeclarations: Always",
+ BreakBeforeConceptDeclarations, FormatStyle::BBCDS_Always);
+ CHECK_PARSE("BreakBeforeConceptDeclarations: Allowed",
+ BreakBeforeConceptDeclarations, FormatStyle::BBCDS_Allowed);
+ // For backward compatibility:
+ CHECK_PARSE("BreakBeforeConceptDeclarations: true",
+ BreakBeforeConceptDeclarations, FormatStyle::BBCDS_Always);
+ CHECK_PARSE("BreakBeforeConceptDeclarations: false",
+ BreakBeforeConceptDeclarations, FormatStyle::BBCDS_Allowed);
}
TEST_F(FormatTest, ParsesConfigurationWithLanguages) {
@@ -23200,275 +23224,584 @@ TEST_F(FormatTest, WebKitDefaultStyle) {
Style);
}
-TEST_F(FormatTest, ConceptsAndRequires) {
- FormatStyle Style = getLLVMStyle();
- Style.AllowShortFunctionsOnASingleLine = FormatStyle::SFS_None;
+TEST_F(FormatTest, Concepts) {
+ EXPECT_EQ(getLLVMStyle().BreakBeforeConceptDeclarations,
+ FormatStyle::BBCDS_Always);
+ verifyFormat("template <typename T>\n"
+ "concept True = true;");
verifyFormat("template <typename T>\n"
- "concept Hashable = requires(T a) {\n"
- " { std::hash<T>{}(a) } -> std::convertible_to<std::size_t>;\n"
- "};",
- Style);
+ "concept C = ((false || foo()) && C2<T>) ||\n"
+ " (std::trait<T>::value && Baz) || sizeof(T) >= 6;",
+ getLLVMStyleWithColumns(60));
+
verifyFormat("template <typename T>\n"
- "concept EqualityComparable = requires(T a, T b) {\n"
- " { a == b } -> bool;\n"
- "};",
- Style);
+ "concept DelayedCheck = true && requires(T t) { t.bar(); } && "
+ "sizeof(T) <= 8;");
+
verifyFormat("template <typename T>\n"
- "concept EqualityComparable = requires(T a, T b) {\n"
- " { a == b } -> bool;\n"
- " { a != b } -> bool;\n"
- "};",
- Style);
+ "concept DelayedCheck = true && requires(T t) {\n"
+ " t.bar();\n"
+ " t.baz();\n"
+ " } && sizeof(T) <= 8;");
+
verifyFormat("template <typename T>\n"
- "concept EqualityComparable = requires(T a, T b) {\n"
- " { a == b } -> bool;\n"
- " { a != b } -> bool;\n"
- "};",
- Style);
+ "concept DelayedCheck = true && requires(T t) { // Comment\n"
+ " t.bar();\n"
+ " t.baz();\n"
+ " } && sizeof(T) <= 8;");
- verifyFormat("template <typename It>\n"
- "requires Iterator<It>\n"
- "void sort(It begin, It end) {\n"
- " //....\n"
- "}",
- Style);
+ verifyFormat("template <typename T>\n"
+ "concept DelayedCheck = false || requires(T t) { t.bar(); } && "
+ "sizeof(T) <= 8;");
verifyFormat("template <typename T>\n"
- "concept Large = sizeof(T) > 10;",
- Style);
+ "concept DelayedCheck = !!false || requires(T t) { t.bar(); } "
+ "&& sizeof(T) <= 8;");
+
+ verifyFormat(
+ "template <typename T>\n"
+ "concept DelayedCheck = static_cast<bool>(0) ||\n"
+ " requires(T t) { t.bar(); } && sizeof(T) <= 8;");
- verifyFormat("template <typename T, typename U>\n"
- "concept FooableWith = requires(T t, U u) {\n"
- " typename T::foo_type;\n"
- " { t.foo(u) } -> typename T::foo_type;\n"
- " t++;\n"
- "};\n"
- "void doFoo(FooableWith<int> auto t) {\n"
- " t.foo(3);\n"
- "}",
- Style);
verifyFormat("template <typename T>\n"
- "concept Context = sizeof(T) == 1;",
- Style);
+ "concept DelayedCheck = bool(0) || requires(T t) { t.bar(); } "
+ "&& sizeof(T) <= 8;");
+
+ verifyFormat(
+ "template <typename T>\n"
+ "concept DelayedCheck = (bool)(0) ||\n"
+ " requires(T t) { t.bar(); } && sizeof(T) <= 8;");
+
verifyFormat("template <typename T>\n"
- "concept Context = is_specialization_of_v<context, T>;",
- Style);
+ "concept DelayedCheck = (bool)0 || requires(T t) { t.bar(); } "
+ "&& sizeof(T) <= 8;");
+
verifyFormat("template <typename T>\n"
- "concept Node = std::is_object_v<T>;",
- Style);
+ "concept Size = sizeof(T) >= 5 && requires(T t) { t.bar(); } && "
+ "sizeof(T) <= 8;");
+
verifyFormat("template <typename T>\n"
- "concept Tree = true;",
- Style);
+ "concept Size = 2 < 5 && 2 <= 5 && 8 >= 5 && 8 > 5 &&\n"
+ " requires(T t) {\n"
+ " t.bar();\n"
+ " t.baz();\n"
+ " } && sizeof(T) <= 8 && !(4 < 3);",
+ getLLVMStyleWithColumns(60));
- verifyFormat("template <typename T> int g(T i) requires Concept1<I> {\n"
- " //...\n"
- "}",
- Style);
+ verifyFormat("template <typename T>\n"
+ "concept TrueOrNot = IsAlwaysTrue || IsNeverTrue;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = foo();");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = foo(T());");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = foo(T{});");
+
+ verifyFormat("template <typename T>\n"
+ "concept Size = V<sizeof(T)>::Value > 5;");
+
+ verifyFormat("template <typename T>\n"
+ "concept True = S<T>::Value;");
verifyFormat(
- "template <typename T> int g(T i) requires Concept1<I> && Concept2<I> {\n"
- " //...\n"
- "}",
- Style);
+ "template <typename T>\n"
+ "concept C = []() { return true; }() && requires(T t) { t.bar(); } &&\n"
+ " sizeof(T) <= 8;");
+
+ // FIXME: This is misformatted because the fake l paren starts at bool, not at
+ // the lambda l square.
+ verifyFormat("template <typename T>\n"
+ "concept C = [] -> bool { return true; }() && requires(T t) { "
+ "t.bar(); } &&\n"
+ " sizeof(T) <= 8;");
verifyFormat(
- "template <typename T> int g(T i) requires Concept1<I> || Concept2<I> {\n"
- " //...\n"
- "}",
- Style);
+ "template <typename T>\n"
+ "concept C = decltype([]() { return std::true_type{}; }())::value &&\n"
+ " requires(T t) { t.bar(); } && sizeof(T) <= 8;");
verifyFormat("template <typename T>\n"
- "veryveryvery_long_return_type g(T i) requires Concept1<I> || "
- "Concept2<I> {\n"
- " //...\n"
- "}",
- Style);
+ "concept C = decltype([]() { return std::true_type{}; "
+ "}())::value && requires(T t) { t.bar(); } && sizeof(T) <= 8;",
+ getLLVMStyleWithColumns(120));
verifyFormat("template <typename T>\n"
- "veryveryvery_long_return_type g(T i) requires Concept1<I> && "
- "Concept2<I> {\n"
- " //...\n"
- "}",
- Style);
+ "concept C = decltype([]() -> std::true_type { return {}; "
+ "}())::value &&\n"
+ " requires(T t) { t.bar(); } && sizeof(T) <= 8;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = true;\n"
+ "Foo Bar;");
+
+ verifyFormat("template <typename T>\n"
+ "concept Hashable = requires(T a) {\n"
+ " { std::hash<T>{}(a) } -> "
+ "std::convertible_to<std::size_t>;\n"
+ " };");
verifyFormat(
"template <typename T>\n"
- "veryveryvery_long_return_type g(T i) requires Concept1 && Concept2 {\n"
- " //...\n"
- "}",
- Style);
+ "concept EqualityComparable = requires(T a, T b) {\n"
+ " { a == b } -> std::same_as<bool>;\n"
+ " };");
verifyFormat(
"template <typename T>\n"
- "veryveryvery_long_return_type g(T i) requires Concept1 || Concept2 {\n"
- " //...\n"
- "}",
- Style);
+ "concept EqualityComparable = requires(T a, T b) {\n"
+ " { a == b } -> std::same_as<bool>;\n"
+ " { a != b } -> std::same_as<bool>;\n"
+ " };");
- verifyFormat("template <typename It>\n"
- "requires Foo<It>() && Bar<It> {\n"
- " //....\n"
- "}",
- Style);
+ verifyFormat("template <typename T>\n"
+ "concept WeakEqualityComparable = requires(T a, T b) {\n"
+ " { a == b };\n"
+ " { a != b };\n"
+ " };");
- verifyFormat("template <typename It>\n"
- "requires Foo<Bar<It>>() && Bar<Foo<It, It>> {\n"
- " //....\n"
- "}",
- Style);
+ verifyFormat("template <typename T>\n"
+ "concept HasSizeT = requires { typename T::size_t; };");
- verifyFormat("template <typename It>\n"
- "requires Foo<Bar<It, It>>() && Bar<Foo<It, It>> {\n"
- " //....\n"
- "}",
- Style);
+ verifyFormat("template <typename T>\n"
+ "concept Semiregular =\n"
+ " DefaultConstructible<T> && CopyConstructible<T> && "
+ "CopyAssignable<T> &&\n"
+ " requires(T a, std::size_t n) {\n"
+ " requires Same<T *, decltype(&a)>;\n"
+ " { a.~T() } noexcept;\n"
+ " requires Same<T *, decltype(new T)>;\n"
+ " requires Same<T *, decltype(new T[n])>;\n"
+ " { delete new T; };\n"
+ " { delete new T[n]; };\n"
+ " };");
+
+ verifyFormat("template <typename T>\n"
+ "concept Semiregular =\n"
+ " requires(T a, std::size_t n) {\n"
+ " requires Same<T *, decltype(&a)>;\n"
+ " { a.~T() } noexcept;\n"
+ " requires Same<T *, decltype(new T)>;\n"
+ " requires Same<T *, decltype(new T[n])>;\n"
+ " { delete new T; };\n"
+ " { delete new T[n]; };\n"
+ " { new T } -> std::same_as<T *>;\n"
+ " } && DefaultConstructible<T> && CopyConstructible<T> && "
+ "CopyAssignable<T>;");
verifyFormat(
- "template <typename It>\n"
- "requires Foo<Bar<It>, Baz<It>>() && Bar<Foo<It>, Baz<It, It>> {\n"
- " //....\n"
- "}",
+ "template <typename T>\n"
+ "concept Semiregular =\n"
+ " DefaultConstructible<T> && requires(T a, std::size_t n) {\n"
+ " requires Same<T *, decltype(&a)>;\n"
+ " { a.~T() } noexcept;\n"
+ " requires Same<T *, decltype(new T)>;\n"
+ " requires Same<T *, decltype(new "
+ "T[n])>;\n"
+ " { delete new T; };\n"
+ " { delete new T[n]; };\n"
+ " } && CopyConstructible<T> && "
+ "CopyAssignable<T>;");
+
+ verifyFormat("template <typename T>\n"
+ "concept Two = requires(T t) {\n"
+ " { t.foo() } -> std::same_as<Bar>;\n"
+ " } && requires(T &&t) {\n"
+ " { t.foo() } -> std::same_as<Bar &&>;\n"
+ " };");
+
+ verifyFormat(
+ "template <typename T>\n"
+ "concept C = requires(T x) {\n"
+ " { *x } -> std::convertible_to<typename T::inner>;\n"
+ " { x + 1 } noexcept -> std::same_as<int>;\n"
+ " { x * 1 } -> std::convertible_to<T>;\n"
+ " };");
+
+ verifyFormat(
+ "template <typename T, typename U = T>\n"
+ "concept Swappable = requires(T &&t, U &&u) {\n"
+ " swap(std::forward<T>(t), std::forward<U>(u));\n"
+ " swap(std::forward<U>(u), std::forward<T>(t));\n"
+ " };");
+
+ verifyFormat("template <typename T, typename U>\n"
+ "concept Common = requires(T &&t, U &&u) {\n"
+ " typename CommonType<T, U>;\n"
+ " { CommonType<T, U>(std::forward<T>(t)) };\n"
+ " };");
+
+ verifyFormat("template <typename T, typename U>\n"
+ "concept Common = requires(T &&t, U &&u) {\n"
+ " typename CommonType<T, U>;\n"
+ " { CommonType<T, U>{std::forward<T>(t)} };\n"
+ " };");
+
+ verifyFormat(
+ "template <typename T>\n"
+ "concept C = requires(T t) {\n"
+ " requires Bar<T> && Foo<T>;\n"
+ " requires((trait<T> && Baz) || (T2<T> && Foo<T>));\n"
+ " };");
+
+ verifyFormat("template <typename T>\n"
+ "concept HasFoo = requires(T t) {\n"
+ " { t.foo() };\n"
+ " t.foo();\n"
+ " };\n"
+ "template <typename T>\n"
+ "concept HasBar = requires(T t) {\n"
+ " { t.bar() };\n"
+ " t.bar();\n"
+ " };");
+
+ verifyFormat("template <typename T>\n"
+ "concept Large = sizeof(T) > 10;");
+
+ verifyFormat("template <typename T, typename U>\n"
+ "concept FooableWith = requires(T t, U u) {\n"
+ " typename T::foo_type;\n"
+ " { t.foo(u) } -> typename T::foo_type;\n"
+ " t++;\n"
+ " };\n"
+ "void doFoo(FooableWith<int> auto t) { t.foo(3); }");
+
+ verifyFormat("template <typename T>\n"
+ "concept Context = is_specialization_of_v<context, T>;");
+
+ verifyFormat("template <typename T>\n"
+ "concept Node = std::is_object_v<T>;");
+
+ auto Style = getLLVMStyle();
+ Style.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Allowed;
+
+ verifyFormat(
+ "template <typename T>\n"
+ "concept C = requires(T t) {\n"
+ " requires Bar<T> && Foo<T>;\n"
+ " requires((trait<T> && Baz) || (T2<T> && Foo<T>));\n"
+ " };",
Style);
- Style.IndentRequires = true;
- verifyFormat("template <typename It>\n"
- " requires Iterator<It>\n"
- "void sort(It begin, It end) {\n"
- " //....\n"
- "}",
+ verifyFormat("template <typename T>\n"
+ "concept HasFoo = requires(T t) {\n"
+ " { t.foo() };\n"
+ " t.foo();\n"
+ " };\n"
+ "template <typename T>\n"
+ "concept HasBar = requires(T t) {\n"
+ " { t.bar() };\n"
+ " t.bar();\n"
+ " };",
Style);
- verifyFormat("template <std::size index_>\n"
- " requires(index_ < sizeof...(Children_))\n"
- "Tree auto &child() {\n"
- " // ...\n"
- "}",
+
+ verifyFormat("template <typename T> concept True = true;", Style);
+
+ verifyFormat("template <typename T>\n"
+ "concept C = decltype([]() -> std::true_type { return {}; "
+ "}())::value &&\n"
+ " requires(T t) { t.bar(); } && sizeof(T) <= 8;",
Style);
- Style.SpaceBeforeParens = FormatStyle::SBPO_Always;
verifyFormat("template <typename T>\n"
- "concept Hashable = requires (T a) {\n"
- " { std::hash<T>{}(a) } -> std::convertible_to<std::size_t>;\n"
- "};",
+ "concept Semiregular =\n"
+ " DefaultConstructible<T> && CopyConstructible<T> && "
+ "CopyAssignable<T> &&\n"
+ " requires(T a, std::size_t n) {\n"
+ " requires Same<T *, decltype(&a)>;\n"
+ " { a.~T() } noexcept;\n"
+ " requires Same<T *, decltype(new T)>;\n"
+ " requires Same<T *, decltype(new T[n])>;\n"
+ " { delete new T; };\n"
+ " { delete new T[n]; };\n"
+ " };",
Style);
- verifyFormat("template <class T = void>\n"
- " requires EqualityComparable<T> || Same<T, void>\n"
- "struct equal_to;",
+ Style.BreakBeforeConceptDeclarations = FormatStyle::BBCDS_Never;
+
+ verifyFormat("template <typename T> concept C =\n"
+ " requires(T t) {\n"
+ " requires Bar<T> && Foo<T>;\n"
+ " requires((trait<T> && Baz) || (T2<T> && Foo<T>));\n"
+ " };",
Style);
- verifyFormat("template <class T>\n"
- " requires requires {\n"
- " T{};\n"
- " T (int);\n"
- " }\n",
+ verifyFormat("template <typename T> concept HasFoo = requires(T t) {\n"
+ " { t.foo() };\n"
+ " t.foo();\n"
+ " };\n"
+ "template <typename T> concept HasBar = requires(T t) {\n"
+ " { t.bar() };\n"
+ " t.bar();\n"
+ " };",
Style);
- Style.ColumnLimit = 78;
+ verifyFormat("template <typename T> concept True = true;", Style);
+
+ verifyFormat(
+ "template <typename T> concept C = decltype([]() -> std::true_type {\n"
+ " return {};\n"
+ " }())::value\n"
+ " && requires(T t) { t.bar(); } &&\n"
+ " sizeof(T) <= 8;",
+ Style);
+
+ verifyFormat("template <typename T> concept Semiregular =\n"
+ " DefaultConstructible<T> && CopyConstructible<T> && "
+ "CopyAssignable<T> &&\n"
+ " requires(T a, std::size_t n) {\n"
+ " requires Same<T *, decltype(&a)>;\n"
+ " { a.~T() } noexcept;\n"
+ " requires Same<T *, decltype(new T)>;\n"
+ " requires Same<T *, decltype(new T[n])>;\n"
+ " { delete new T; };\n"
+ " { delete new T[n]; };\n"
+ " };",
+ Style);
+
+ // The following tests are invalid C++, we just want to make sure we don't
+ // assert.
verifyFormat("template <typename T>\n"
- "concept Context = Traits<typename T::traits_type> and\n"
- " Interface<typename T::interface_type> and\n"
- " Request<typename T::request_type> and\n"
- " Response<typename T::response_type> and\n"
- " ContextExtension<typename T::extension_type> and\n"
- " ::std::is_copy_constructable<T> and "
- "::std::is_move_constructable<T> and\n"
- " requires (T c) {\n"
- " { c.response; } -> Response;\n"
- "} and requires (T c) {\n"
- " { c.request; } -> Request;\n"
- "}\n",
+ "concept C = requires C2<T>;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = 5 + 4;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C =\n"
+ "class X;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = [] && true;");
+
+ verifyFormat("template <typename T>\n"
+ "concept C = [] && requires(T t) { typename T::size_type; };");
+}
+
+TEST_F(FormatTest, RequiresClauses) {
+ auto Style = getLLVMStyle();
+ EXPECT_EQ(Style.RequiresClausePosition, FormatStyle::RCPS_OwnLine);
+ EXPECT_EQ(Style.IndentRequiresClause, true);
+
+ verifyFormat("template <typename T>\n"
+ " requires(Foo<T> && std::trait<T>)\n"
+ "struct Bar;",
Style);
verifyFormat("template <typename T>\n"
- "concept Context = Traits<typename T::traits_type> or\n"
- " Interface<typename T::interface_type> or\n"
- " Request<typename T::request_type> or\n"
- " Response<typename T::response_type> or\n"
- " ContextExtension<typename T::extension_type> or\n"
- " ::std::is_copy_constructable<T> or "
- "::std::is_move_constructable<T> or\n"
- " requires (T c) {\n"
- " { c.response; } -> Response;\n"
- "} or requires (T c) {\n"
- " { c.request; } -> Request;\n"
- "}\n",
+ " requires(Foo<T> && std::trait<T>)\n"
+ "class Bar {\n"
+ "public:\n"
+ " Bar(T t);\n"
+ " bool baz();\n"
+ "};",
Style);
+ verifyFormat(
+ "template <typename T>\n"
+ " requires requires(T &&t) {\n"
+ " typename T::I;\n"
+ " requires(F<typename T::I> && std::trait<typename T::I>);\n"
+ " }\n"
+ "Bar(T) -> Bar<typename T::I>;",
+ Style);
+
verifyFormat("template <typename T>\n"
- "concept Context = Traits<typename T::traits_type> &&\n"
- " Interface<typename T::interface_type> &&\n"
- " Request<typename T::request_type> &&\n"
- " Response<typename T::response_type> &&\n"
- " ContextExtension<typename T::extension_type> &&\n"
- " ::std::is_copy_constructable<T> && "
- "::std::is_move_constructable<T> &&\n"
- " requires (T c) {\n"
- " { c.response; } -> Response;\n"
- "} && requires (T c) {\n"
- " { c.request; } -> Request;\n"
- "}\n",
+ " requires(Foo<T> && std::trait<T>)\n"
+ "constexpr T MyGlobal;",
Style);
- verifyFormat("template <typename T>\nconcept someConcept = Constraint1<T> && "
- "Constraint2<T>;");
+ verifyFormat("template <typename T>\n"
+ " requires Foo<T> && requires(T t) {\n"
+ " { t.baz() } -> std::same_as<bool>;\n"
+ " requires std::same_as<T::Factor, int>;\n"
+ " }\n"
+ "inline int bar(T t) {\n"
+ " return t.baz() ? T::Factor : 5;\n"
+ "}",
+ Style);
- Style.BreakBeforeBraces = FormatStyle::BS_Custom;
- Style.BraceWrapping.AfterFunction = true;
- Style.BraceWrapping.AfterClass = true;
- Style.AlwaysBreakTemplateDeclarations = FormatStyle::BTDS_Yes;
- Style.BreakConstructorInitializers = FormatStyle::BCIS_BeforeColon;
- verifyFormat("void Foo () requires (std::copyable<T>)\n"
+ verifyFormat("template <typename T>\n"
+ "inline int bar(T t)\n"
+ " requires Foo<T> && requires(T t) {\n"
+ " { t.baz() } -> std::same_as<bool>;\n"
+ " requires std::same_as<T::Factor, int>;\n"
+ " }\n"
"{\n"
- " return\n"
- "}\n",
+ " return t.baz() ? T::Factor : 5;\n"
+ "}",
Style);
- verifyFormat("void Foo () requires std::copyable<T>\n"
- "{\n"
- " return\n"
- "}\n",
+ verifyFormat("template <typename T>\n"
+ " requires F<T>\n"
+ "int bar(T t) {\n"
+ " return 5;\n"
+ "}",
Style);
- verifyFormat("template <std::semiregular F, std::semiregular... Args>\n"
- " requires (std::invocable<F, std::invoke_result_t<Args>...>)\n"
- "struct constant;",
+ verifyFormat("template <typename T>\n"
+ "int bar(T t)\n"
+ " requires F<T>\n"
+ "{\n"
+ " return 5;\n"
+ "}",
Style);
- verifyFormat("template <std::semiregular F, std::semiregular... Args>\n"
- " requires std::invocable<F, std::invoke_result_t<Args>...>\n"
- "struct constant;",
+ Style.IndentRequiresClause = false;
+ verifyFormat("template <typename T>\n"
+ "requires F<T>\n"
+ "int bar(T t) {\n"
+ " return 5;\n"
+ "}",
Style);
- verifyFormat("template <class T>\n"
- "class plane_with_very_very_very_long_name\n"
+ verifyFormat("template <typename T>\n"
+ "int bar(T t)\n"
+ "requires F<T>\n"
"{\n"
- " constexpr plane_with_very_very_very_long_name () requires "
- "std::copyable<T>\n"
- " : plane_with_very_very_very_long_name (1)\n"
- " {\n"
- " }\n"
- "}\n",
+ " return 5;\n"
+ "}",
Style);
- verifyFormat("template <class T>\n"
- "class plane_with_long_name\n"
- "{\n"
- " constexpr plane_with_long_name () requires std::copyable<T>\n"
- " : plane_with_long_name (1)\n"
- " {\n"
- " }\n"
- "}\n",
+ Style.RequiresClausePosition = FormatStyle::RCPS_SingleLine;
+ verifyFormat("template <typename T> requires Foo<T> struct Bar {};\n"
+ "template <typename T> requires Foo<T> void bar() {}\n"
+ "template <typename T> void bar() requires Foo<T> {}\n"
+ "template <typename T> requires Foo<T> Bar(T) -> Bar<T>;",
+ Style);
+
+ auto ColumnStyle = Style;
+ ColumnStyle.ColumnLimit = 40;
+ verifyFormat("template <typename AAAAAAA>\n"
+ "requires Foo<T> struct Bar {};\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<T> void bar() {}\n"
+ "template <typename AAAAAAA>\n"
+ "void bar() requires Foo<T> {}\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<T> Baz(T) -> Baz<T>;",
+ ColumnStyle);
+
+ verifyFormat("template <typename T>\n"
+ "requires Foo<AAAAAAA> struct Bar {};\n"
+ "template <typename T>\n"
+ "requires Foo<AAAAAAA> void bar() {}\n"
+ "template <typename T>\n"
+ "void bar() requires Foo<AAAAAAA> {}\n"
+ "template <typename T>\n"
+ "requires Foo<AAAAAAA> Bar(T) -> Bar<T>;",
+ ColumnStyle);
+
+ verifyFormat("template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "struct Bar {};\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "void bar() {}\n"
+ "template <typename AAAAAAA>\n"
+ "void bar()\n"
+ " requires Foo<AAAAAAAAAAAAAAAA> {}\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAA> Bar(T) -> Bar<T>;\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "Bar(T) -> Bar<T>;",
+ ColumnStyle);
+
+ Style.RequiresClausePosition = FormatStyle::RCPS_WithFollowing;
+ ColumnStyle.RequiresClausePosition = FormatStyle::RCPS_WithFollowing;
+
+ verifyFormat("template <typename T>\n"
+ "requires Foo<T> struct Bar {};\n"
+ "template <typename T>\n"
+ "requires Foo<T> void bar() {}\n"
+ "template <typename T>\n"
+ "void bar()\n"
+ "requires Foo<T> {}\n"
+ "template <typename T>\n"
+ "requires Foo<T> Bar(T) -> Bar<T>;",
Style);
- Style.BreakBeforeConceptDeclarations = false;
- verifyFormat("template <typename T> concept Tree = true;", Style);
+ verifyFormat("template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "struct Bar {};\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "void bar() {}\n"
+ "template <typename AAAAAAA>\n"
+ "void bar()\n"
+ "requires Foo<AAAAAAAAAAAAAAAA> {}\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAA> Bar(T) -> Bar<T>;\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "Bar(T) -> Bar<T>;",
+ ColumnStyle);
- Style.IndentRequires = false;
- verifyFormat("template <std::semiregular F, std::semiregular... Args>\n"
- "requires (std::invocable<F, std::invoke_result_t<Args>...>) "
- "struct constant;",
+ Style.IndentRequiresClause = true;
+ ColumnStyle.IndentRequiresClause = true;
+
+ verifyFormat("template <typename T>\n"
+ " requires Foo<T> struct Bar {};\n"
+ "template <typename T>\n"
+ " requires Foo<T> void bar() {}\n"
+ "template <typename T>\n"
+ "void bar()\n"
+ " requires Foo<T> {}\n"
+ "template <typename T>\n"
+ " requires Foo<T> Bar(T) -> Bar<T>;",
Style);
+
+ verifyFormat("template <typename AAAAAAA>\n"
+ " requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "struct Bar {};\n"
+ "template <typename AAAAAAA>\n"
+ " requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "void bar() {}\n"
+ "template <typename AAAAAAA>\n"
+ "void bar()\n"
+ " requires Foo<AAAAAAAAAAAAAAAA> {}\n"
+ "template <typename AAAAAAA>\n"
+ " requires Foo<AAAAAA> Bar(T) -> Bar<T>;\n"
+ "template <typename AAAAAAA>\n"
+ " requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "Bar(T) -> Bar<T>;",
+ ColumnStyle);
+
+ Style.RequiresClausePosition = FormatStyle::RCPS_WithPreceding;
+ ColumnStyle.RequiresClausePosition = FormatStyle::RCPS_WithPreceding;
+
+ verifyFormat("template <typename T> requires Foo<T>\n"
+ "struct Bar {};\n"
+ "template <typename T> requires Foo<T>\n"
+ "void bar() {}\n"
+ "template <typename T>\n"
+ "void bar() requires Foo<T>\n"
+ "{}\n"
+ "template <typename T> requires Foo<T>\n"
+ "Bar(T) -> Bar<T>;",
+ Style);
+
+ verifyFormat("template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "struct Bar {};\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "void bar() {}\n"
+ "template <typename AAAAAAA>\n"
+ "void bar()\n"
+ " requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "{}\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAA>\n"
+ "Bar(T) -> Bar<T>;\n"
+ "template <typename AAAAAAA>\n"
+ "requires Foo<AAAAAAAAAAAAAAAA>\n"
+ "Bar(T) -> Bar<T>;",
+ ColumnStyle);
}
TEST_F(FormatTest, StatementAttributeLikeMacros) {
diff --git a/clang/unittests/Format/TokenAnnotatorTest.cpp b/clang/unittests/Format/TokenAnnotatorTest.cpp
index acb7386a89df..0d4be8d78853 100644
--- a/clang/unittests/Format/TokenAnnotatorTest.cpp
+++ b/clang/unittests/Format/TokenAnnotatorTest.cpp
@@ -14,6 +14,14 @@
namespace clang {
namespace format {
+
+// Not really the equality, but everything we need.
+static bool operator==(const FormatToken &LHS,
+ const FormatToken &RHS) noexcept {
+ return LHS.Tok.getKind() == RHS.Tok.getKind() &&
+ LHS.getType() == RHS.getType();
+}
+
namespace {
class TokenAnnotatorTest : public ::testing::Test {
@@ -119,6 +127,261 @@ TEST_F(TokenAnnotatorTest, UnderstandsDelete) {
EXPECT_TOKEN(Tokens[7], tok::r_paren, TT_CastRParen);
}
+TEST_F(TokenAnnotatorTest, UnderstandsRequiresClausesAndConcepts) {
+ auto Tokens = annotate("template <typename T>\n"
+ "concept C = (Foo && Bar) && (Bar && Baz);");
+
+ ASSERT_EQ(Tokens.size(), 21u) << Tokens;
+ EXPECT_TOKEN(Tokens[10], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[13], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[16], tok::ampamp, TT_BinaryOperator);
+
+ Tokens = annotate("template <typename T>\n"
+ "concept C = requires(T t) {\n"
+ " { t.foo() };\n"
+ "} && Bar<T> && Baz<T>;");
+ ASSERT_EQ(Tokens.size(), 35u) << Tokens;
+ EXPECT_TOKEN(Tokens[23], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[28], tok::ampamp, TT_BinaryOperator);
+
+ Tokens = annotate("template<typename T>\n"
+ "requires C1<T> && (C21<T> || C22<T> && C2e<T>) && C3<T>\n"
+ "struct Foo;");
+ ASSERT_EQ(Tokens.size(), 36u) << Tokens;
+ EXPECT_TOKEN(Tokens[6], tok::identifier, TT_Unknown);
+ EXPECT_EQ(Tokens[6]->FakeLParens.size(), 1u);
+ EXPECT_TOKEN(Tokens[10], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[16], tok::pipepipe, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[21], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[27], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[31], tok::greater, TT_TemplateCloser);
+ EXPECT_EQ(Tokens[31]->FakeRParens, 1u);
+ EXPECT_TRUE(Tokens[31]->ClosesRequiresClause);
+
+ Tokens =
+ annotate("template<typename T>\n"
+ "requires (C1<T> && (C21<T> || C22<T> && C2e<T>) && C3<T>)\n"
+ "struct Foo;");
+ ASSERT_EQ(Tokens.size(), 38u) << Tokens;
+ EXPECT_TOKEN(Tokens[7], tok::identifier, TT_Unknown);
+ EXPECT_EQ(Tokens[7]->FakeLParens.size(), 1u);
+ EXPECT_TOKEN(Tokens[11], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[17], tok::pipepipe, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[22], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[28], tok::ampamp, TT_BinaryOperator);
+ EXPECT_TOKEN(Tokens[32], tok::greater, TT_TemplateCloser);
+ EXPECT_EQ(Tokens[32]->FakeRParens, 1u);
+ EXPECT_TOKEN(Tokens[33], tok::r_paren, TT_Unknown);
+ EXPECT_TRUE(Tokens[33]->ClosesRequiresClause);
+}
+
+TEST_F(TokenAnnotatorTest, RequiresDoesNotChangeParsingOfTheRest) {
+ auto NumberOfAdditionalRequiresClauseTokens = 5u;
+ auto NumberOfTokensBeforeRequires = 5u;
+
+ auto BaseTokens = annotate("template<typename T>\n"
+ "T Pi = 3.14;");
+ auto ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "T Pi = 3.14;");
+
+ auto NumberOfBaseTokens = 11u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "struct Bar;");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "struct Bar;");
+ NumberOfBaseTokens = 9u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "struct Bar {"
+ " T foo();\n"
+ " T bar();\n"
+ "};");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "struct Bar {"
+ " T foo();\n"
+ " T bar();\n"
+ "};");
+ NumberOfBaseTokens = 21u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "Bar(T) -> Bar<T>;");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "Bar(T) -> Bar<T>;");
+ NumberOfBaseTokens = 16u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "T foo();");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "T foo();");
+ NumberOfBaseTokens = 11u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "T foo() {\n"
+ " auto bar = baz();\n"
+ " return bar + T{};\n"
+ "}");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires Foo<T>\n"
+ "T foo() {\n"
+ " auto bar = baz();\n"
+ " return bar + T{};\n"
+ "}");
+ NumberOfBaseTokens = 26u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "T foo();");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ "T foo() requires Foo<T>;");
+ NumberOfBaseTokens = 11u;
+ NumberOfTokensBeforeRequires = 9u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "T foo() {\n"
+ " auto bar = baz();\n"
+ " return bar + T{};\n"
+ "}");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ "T foo() requires Foo<T> {\n"
+ " auto bar = baz();\n"
+ " return bar + T{};\n"
+ "}");
+ NumberOfBaseTokens = 26u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+
+ BaseTokens = annotate("template<typename T>\n"
+ "Bar(T) -> Bar<typename T::I>;");
+ ConstrainedTokens = annotate("template<typename T>\n"
+ " requires requires(T &&t) {\n"
+ " typename T::I;\n"
+ " }\n"
+ "Bar(T) -> Bar<typename T::I>;");
+ NumberOfBaseTokens = 19u;
+ NumberOfAdditionalRequiresClauseTokens = 14u;
+ NumberOfTokensBeforeRequires = 5u;
+
+ ASSERT_EQ(BaseTokens.size(), NumberOfBaseTokens) << BaseTokens;
+ ASSERT_EQ(ConstrainedTokens.size(),
+ NumberOfBaseTokens + NumberOfAdditionalRequiresClauseTokens)
+ << ConstrainedTokens;
+
+ for (auto I = 0u; I < NumberOfBaseTokens; ++I)
+ if (I < NumberOfTokensBeforeRequires)
+ EXPECT_EQ(*BaseTokens[I], *ConstrainedTokens[I]) << I;
+ else
+ EXPECT_EQ(*BaseTokens[I],
+ *ConstrainedTokens[I + NumberOfAdditionalRequiresClauseTokens])
+ << I;
+}
+
} // namespace
} // namespace format
} // namespace clang
diff --git a/compiler-rt/cmake/Modules/AddCompilerRT.cmake b/compiler-rt/cmake/Modules/AddCompilerRT.cmake
index b69833cbcc09..4a496fc18f65 100644
--- a/compiler-rt/cmake/Modules/AddCompilerRT.cmake
+++ b/compiler-rt/cmake/Modules/AddCompilerRT.cmake
@@ -638,6 +638,7 @@ macro(add_custom_libcxx name prefix)
-DLIBCXXABI_ENABLE_SHARED=OFF
-DLIBCXXABI_HERMETIC_STATIC_LIBRARY=ON
-DLIBCXXABI_INCLUDE_TESTS=OFF
+ -DLIBCXX_CXX_ABI=libcxxabi
-DLIBCXX_ENABLE_EXPERIMENTAL_LIBRARY=OFF
-DLIBCXX_ENABLE_SHARED=OFF
-DLIBCXX_HERMETIC_STATIC_LIBRARY=ON
diff --git a/compiler-rt/lib/asan/asan_linux.cpp b/compiler-rt/lib/asan/asan_linux.cpp
index 1d92c530bd11..42de45b63271 100644
--- a/compiler-rt/lib/asan/asan_linux.cpp
+++ b/compiler-rt/lib/asan/asan_linux.cpp
@@ -131,30 +131,21 @@ static int FindFirstDSOCallback(struct dl_phdr_info *info, size_t size,
VReport(2, "info->dlpi_name = %s\tinfo->dlpi_addr = %p\n", info->dlpi_name,
(void *)info->dlpi_addr);
- // Continue until the first dynamic library is found
- if (!info->dlpi_name || info->dlpi_name[0] == 0)
- return 0;
-
- // Ignore vDSO
- if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
- return 0;
+ const char **name = (const char **)data;
-#if SANITIZER_FREEBSD || SANITIZER_NETBSD
// Ignore first entry (the main program)
- char **p = (char **)data;
- if (!(*p)) {
- *p = (char *)-1;
+ if (!*name) {
+ *name = "";
return 0;
}
-#endif
-#if SANITIZER_SOLARIS
- // Ignore executable on Solaris
- if (info->dlpi_addr == 0)
+# if SANITIZER_LINUX
+ // Ignore vDSO
+ if (internal_strncmp(info->dlpi_name, "linux-", sizeof("linux-") - 1) == 0)
return 0;
-#endif
+# endif
- *(const char **)data = info->dlpi_name;
+ *name = info->dlpi_name;
return 1;
}
@@ -175,7 +166,7 @@ void AsanCheckDynamicRTPrereqs() {
// Ensure that dynamic RT is the first DSO in the list
const char *first_dso_name = nullptr;
dl_iterate_phdr(FindFirstDSOCallback, &first_dso_name);
- if (first_dso_name && !IsDynamicRTName(first_dso_name)) {
+ if (first_dso_name && first_dso_name[0] && !IsDynamicRTName(first_dso_name)) {
Report("ASan runtime does not come first in initial library list; "
"you should either link runtime to your application or "
"manually preload it with LD_PRELOAD.\n");
diff --git a/compiler-rt/lib/asan/asan_poisoning.cpp b/compiler-rt/lib/asan/asan_poisoning.cpp
index bbc7db4709e1..3b7c9d1312d6 100644
--- a/compiler-rt/lib/asan/asan_poisoning.cpp
+++ b/compiler-rt/lib/asan/asan_poisoning.cpp
@@ -12,11 +12,13 @@
//===----------------------------------------------------------------------===//
#include "asan_poisoning.h"
+
#include "asan_report.h"
#include "asan_stack.h"
#include "sanitizer_common/sanitizer_atomic.h"
-#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
+#include "sanitizer_common/sanitizer_libc.h"
namespace __asan {
diff --git a/compiler-rt/lib/asan/asan_report.cpp b/compiler-rt/lib/asan/asan_report.cpp
index 2a38fabaf220..2a55d6c0978d 100644
--- a/compiler-rt/lib/asan/asan_report.cpp
+++ b/compiler-rt/lib/asan/asan_report.cpp
@@ -11,17 +11,19 @@
// This file contains error reporting code.
//===----------------------------------------------------------------------===//
+#include "asan_report.h"
+
+#include "asan_descriptions.h"
#include "asan_errors.h"
#include "asan_flags.h"
-#include "asan_descriptions.h"
#include "asan_internal.h"
#include "asan_mapping.h"
-#include "asan_report.h"
#include "asan_scariness_score.h"
#include "asan_stack.h"
#include "asan_thread.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_report_decorator.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
diff --git a/compiler-rt/lib/asan/asan_rtl.cpp b/compiler-rt/lib/asan/asan_rtl.cpp
index f0bbbf32e6a6..18df9f7e5494 100644
--- a/compiler-rt/lib/asan/asan_rtl.cpp
+++ b/compiler-rt/lib/asan/asan_rtl.cpp
@@ -27,6 +27,7 @@
#include "lsan/lsan_common.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
#include "ubsan/ubsan_init.h"
@@ -44,7 +45,9 @@ static void AsanDie() {
static atomic_uint32_t num_calls;
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
// Don't die twice - run a busy loop.
- while (1) { }
+ while (1) {
+ internal_sched_yield();
+ }
}
if (common_flags()->print_module_map >= 1)
DumpProcessMap();
diff --git a/compiler-rt/lib/hwasan/hwasan.cpp b/compiler-rt/lib/hwasan/hwasan.cpp
index 6f0ea64472c6..f8725a173432 100644
--- a/compiler-rt/lib/hwasan/hwasan.cpp
+++ b/compiler-rt/lib/hwasan/hwasan.cpp
@@ -25,6 +25,7 @@
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
diff --git a/compiler-rt/lib/lsan/lsan.cpp b/compiler-rt/lib/lsan/lsan.cpp
index b6adc248157b..ed47acc37232 100644
--- a/compiler-rt/lib/lsan/lsan.cpp
+++ b/compiler-rt/lib/lsan/lsan.cpp
@@ -13,11 +13,12 @@
#include "lsan.h"
-#include "sanitizer_common/sanitizer_flags.h"
-#include "sanitizer_common/sanitizer_flag_parser.h"
#include "lsan_allocator.h"
#include "lsan_common.h"
#include "lsan_thread.h"
+#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
bool lsan_inited;
bool lsan_init_is_running;
diff --git a/compiler-rt/lib/memprof/memprof_rtl.cpp b/compiler-rt/lib/memprof/memprof_rtl.cpp
index c3d1c5f096fb..21424fb4f072 100644
--- a/compiler-rt/lib/memprof/memprof_rtl.cpp
+++ b/compiler-rt/lib/memprof/memprof_rtl.cpp
@@ -21,6 +21,7 @@
#include "memprof_thread.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
@@ -38,6 +39,7 @@ static void MemprofDie() {
if (atomic_fetch_add(&num_calls, 1, memory_order_relaxed) != 0) {
// Don't die twice - run a busy loop.
while (1) {
+ internal_sched_yield();
}
}
if (common_flags()->print_module_map >= 1)
diff --git a/compiler-rt/lib/msan/msan.cpp b/compiler-rt/lib/msan/msan.cpp
index c554a830e755..77ae20368c84 100644
--- a/compiler-rt/lib/msan/msan.cpp
+++ b/compiler-rt/lib/msan/msan.cpp
@@ -12,20 +12,22 @@
//===----------------------------------------------------------------------===//
#include "msan.h"
+
#include "msan_chained_origin_depot.h"
#include "msan_origin.h"
+#include "msan_poisoning.h"
#include "msan_report.h"
#include "msan_thread.h"
-#include "msan_poisoning.h"
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
-#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_flag_parser.h"
+#include "sanitizer_common/sanitizer_flags.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_procmaps.h"
+#include "sanitizer_common/sanitizer_stackdepot.h"
#include "sanitizer_common/sanitizer_stacktrace.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
-#include "sanitizer_common/sanitizer_stackdepot.h"
#include "ubsan/ubsan_flags.h"
#include "ubsan/ubsan_init.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
index e9379b7bdc96..68bb82aabab1 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.cpp
@@ -11,10 +11,12 @@
//===----------------------------------------------------------------------===//
#include "sanitizer_common.h"
+
#include "sanitizer_allocator_interface.h"
#include "sanitizer_allocator_internal.h"
#include "sanitizer_atomic.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_libc.h"
#include "sanitizer_placement_new.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common.h b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
index 7b591ca52892..2e132066f97e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common.h
@@ -16,7 +16,6 @@
#define SANITIZER_COMMON_H
#include "sanitizer_flags.h"
-#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_list.h"
@@ -286,7 +285,7 @@ void SetStackSizeLimitInBytes(uptr limit);
bool AddressSpaceIsUnlimited();
void SetAddressSpaceUnlimited();
void AdjustStackSize(void *attr);
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args);
+void PlatformPrepareForSandboxing(void *args);
void SetSandboxingCallback(void (*f)());
void InitializeCoverage(bool enabled, const char *coverage_dir);
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
index c4cc0e45193e..6cfce8a8f233 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_common_libcdep.cpp
@@ -14,6 +14,7 @@
#include "sanitizer_allocator_interface.h"
#include "sanitizer_common.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_procmaps.h"
#include "sanitizer_stackdepot.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
index 1d0dbe592b93..35c325359148 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_fuchsia.cpp
@@ -33,6 +33,7 @@
#include "sanitizer_atomic.h"
#include "sanitizer_common.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_symbolizer_fuchsia.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
index 13e77819a339..6cf7cfb5722e 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_coverage_libcdep_new.cpp
@@ -10,11 +10,12 @@
#include "sanitizer_platform.h"
#if !SANITIZER_FUCHSIA
-#include "sancov_flags.h"
-#include "sanitizer_allocator_internal.h"
-#include "sanitizer_atomic.h"
-#include "sanitizer_common.h"
-#include "sanitizer_file.h"
+# include "sancov_flags.h"
+# include "sanitizer_allocator_internal.h"
+# include "sanitizer_atomic.h"
+# include "sanitizer_common.h"
+# include "sanitizer_file.h"
+# include "sanitizer_interface_internal.h"
using namespace __sanitizer;
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
index 5492560df914..c3e08f58c2ce 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.cpp
@@ -19,6 +19,7 @@
#include "sanitizer_common.h"
#include "sanitizer_file.h"
+# include "sanitizer_interface_internal.h"
namespace __sanitizer {
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_file.h b/compiler-rt/lib/sanitizer_common/sanitizer_file.h
index 3d7916171c1e..2f98d8281b4c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_file.h
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_file.h
@@ -15,7 +15,6 @@
#ifndef SANITIZER_FILE_H
#define SANITIZER_FILE_H
-#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_mutex.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
index 66a0fd64a05a..398d3b8f154c 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_fuchsia.cpp
@@ -14,17 +14,18 @@
#include "sanitizer_fuchsia.h"
#if SANITIZER_FUCHSIA
-#include <pthread.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <zircon/errors.h>
-#include <zircon/process.h>
-#include <zircon/syscalls.h>
-#include <zircon/utc.h>
-
-#include "sanitizer_common.h"
-#include "sanitizer_libc.h"
-#include "sanitizer_mutex.h"
+# include <pthread.h>
+# include <stdlib.h>
+# include <unistd.h>
+# include <zircon/errors.h>
+# include <zircon/process.h>
+# include <zircon/syscalls.h>
+# include <zircon/utc.h>
+
+# include "sanitizer_common.h"
+# include "sanitizer_interface_internal.h"
+# include "sanitizer_libc.h"
+# include "sanitizer_mutex.h"
namespace __sanitizer {
@@ -89,7 +90,7 @@ void InitializePlatformEarly() {}
void MaybeReexec() {}
void CheckASLR() {}
void CheckMPROTECT() {}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
void DisableCoreDumperIfNecessary() {}
void InstallDeadlySignalHandlers(SignalHandlerType handler) {}
void SetAlternateSignalStack() {}
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
index 3b20e1c5e2af..294464ab2ed7 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_mac.cpp
@@ -25,6 +25,7 @@
#include "sanitizer_common.h"
#include "sanitizer_file.h"
#include "sanitizer_flags.h"
+#include "sanitizer_interface_internal.h"
#include "sanitizer_internal_defs.h"
#include "sanitizer_libc.h"
#include "sanitizer_platform_limits_posix.h"
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
index eed02ce4f6aa..b6d8c7281bd4 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_posix_libcdep.cpp
@@ -290,7 +290,7 @@ bool IsAccessibleMemoryRange(uptr beg, uptr size) {
return result;
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {
+void PlatformPrepareForSandboxing(void *args) {
// Some kinds of sandboxes may forbid filesystem access, so we won't be able
// to read the file mappings from /proc/self/maps. Luckily, neither the
// process will be able to load additional libraries, so it's fine to use the
diff --git a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
index 7c84cdc22ce4..d59072669715 100644
--- a/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
+++ b/compiler-rt/lib/sanitizer_common/sanitizer_win.cpp
@@ -517,7 +517,7 @@ void ReExec() {
UNIMPLEMENTED();
}
-void PlatformPrepareForSandboxing(__sanitizer_sandbox_arguments *args) {}
+void PlatformPrepareForSandboxing(void *args) {}
bool StackSizeIsUnlimited() {
UNIMPLEMENTED();
diff --git a/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp b/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp
index 8e7be955a196..579fd9904df4 100644
--- a/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp
+++ b/compiler-rt/lib/sanitizer_common/tests/sanitizer_common_test.cpp
@@ -11,17 +11,25 @@
//===----------------------------------------------------------------------===//
#include <algorithm>
+// This ensures that including both internal sanitizer_common headers
+// and the interface headers does not lead to compilation failures.
+// Both may be included in unit tests, where googletest transitively
+// pulls in sanitizer interface headers.
+// The headers are specifically included using relative paths,
+// because a compiler may use a different mismatching version
+// of sanitizer headers.
+#include "../../../include/sanitizer/asan_interface.h"
+#include "../../../include/sanitizer/msan_interface.h"
+#include "../../../include/sanitizer/tsan_interface.h"
+#include "gtest/gtest.h"
#include "sanitizer_common/sanitizer_allocator_internal.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
#include "sanitizer_common/sanitizer_flags.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_platform.h"
-
#include "sanitizer_pthread_wrappers.h"
-#include "gtest/gtest.h"
-
namespace __sanitizer {
static bool IsSorted(const uptr *array, uptr n) {
diff --git a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
index 999be79c6418..ea99c3843075 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_interceptors_posix.cpp
@@ -1554,16 +1554,16 @@ TSAN_INTERCEPTOR(int, __fxstat, int version, int fd, void *buf) {
#endif
TSAN_INTERCEPTOR(int, fstat, int fd, void *buf) {
-#if SANITIZER_FREEBSD || SANITIZER_MAC || SANITIZER_ANDROID || SANITIZER_NETBSD
- SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
+#if SANITIZER_GLIBC
+ SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(fstat)(fd, buf);
+ return REAL(__fxstat)(0, fd, buf);
#else
- SCOPED_TSAN_INTERCEPTOR(__fxstat, 0, fd, buf);
+ SCOPED_TSAN_INTERCEPTOR(fstat, fd, buf);
if (fd > 0)
FdAccess(thr, pc, fd);
- return REAL(__fxstat)(0, fd, buf);
+ return REAL(fstat)(fd, buf);
#endif
}
diff --git a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
index c068d8e486b0..45fa6d6f00c0 100644
--- a/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
+++ b/compiler-rt/lib/tsan/rtl/tsan_rtl.cpp
@@ -16,6 +16,7 @@
#include "sanitizer_common/sanitizer_atomic.h"
#include "sanitizer_common/sanitizer_common.h"
#include "sanitizer_common/sanitizer_file.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_placement_new.h"
#include "sanitizer_common/sanitizer_stackdepot.h"
diff --git a/compiler-rt/lib/ubsan/ubsan_init.cpp b/compiler-rt/lib/ubsan/ubsan_init.cpp
index 9931d85bf40c..5802d58896f0 100644
--- a/compiler-rt/lib/ubsan/ubsan_init.cpp
+++ b/compiler-rt/lib/ubsan/ubsan_init.cpp
@@ -12,13 +12,14 @@
#include "ubsan_platform.h"
#if CAN_SANITIZE_UB
-#include "ubsan_diag.h"
-#include "ubsan_init.h"
-#include "ubsan_flags.h"
#include "sanitizer_common/sanitizer_common.h"
+#include "sanitizer_common/sanitizer_interface_internal.h"
#include "sanitizer_common/sanitizer_libc.h"
#include "sanitizer_common/sanitizer_mutex.h"
#include "sanitizer_common/sanitizer_symbolizer.h"
+#include "ubsan_diag.h"
+#include "ubsan_flags.h"
+#include "ubsan_init.h"
using namespace __ubsan;
diff --git a/flang/include/flang/Optimizer/Dialect/FIRType.h b/flang/include/flang/Optimizer/Dialect/FIRType.h
index f1f8910e7090..516d8e56189c 100644
--- a/flang/include/flang/Optimizer/Dialect/FIRType.h
+++ b/flang/include/flang/Optimizer/Dialect/FIRType.h
@@ -184,6 +184,13 @@ inline bool singleIndirectionLevel(mlir::Type ty) {
}
#endif
+/// Return true iff `ty` is a RecordType with type parameters.
+inline bool isRecordWithTypeParameters(mlir::Type ty) {
+ if (auto recTy = ty.dyn_cast_or_null<fir::RecordType>())
+ return recTy.getNumLenParams() != 0;
+ return false;
+}
+
/// Apply the components specified by `path` to `rootTy` to determine the type
/// of the resulting component element. `rootTy` should be an aggregate type.
/// Returns null on error.
diff --git a/flang/lib/Evaluate/check-expression.cpp b/flang/lib/Evaluate/check-expression.cpp
index 1bb33b62151e..64b118a54aa7 100644
--- a/flang/lib/Evaluate/check-expression.cpp
+++ b/flang/lib/Evaluate/check-expression.cpp
@@ -526,18 +526,14 @@ public:
} else {
return "dummy procedure argument";
}
+ } else if (&symbol.owner() != &scope_ || &ultimate.owner() != &scope_) {
+ return std::nullopt; // host association is in play
} else if (const auto *object{
ultimate.detailsIf<semantics::ObjectEntityDetails>()}) {
if (object->commonBlock()) {
return std::nullopt;
}
}
- for (const semantics::Scope *s{&scope_}; !s->IsGlobal();) {
- s = &s->parent();
- if (s == &ultimate.owner()) {
- return std::nullopt;
- }
- }
return "reference to local entity '"s + ultimate.name().ToString() + "'";
}
diff --git a/flang/lib/Evaluate/tools.cpp b/flang/lib/Evaluate/tools.cpp
index 726d5308eedf..00b9769befe8 100644
--- a/flang/lib/Evaluate/tools.cpp
+++ b/flang/lib/Evaluate/tools.cpp
@@ -971,11 +971,18 @@ std::optional<parser::MessageFixedText> CheckProcCompatibility(bool isCall,
} else if (lhsProcedure->HasExplicitInterface() &&
!rhsProcedure->HasExplicitInterface()) {
// Section 10.2.2.4, paragraph 3 prohibits associating a procedure pointer
- // with an explicit interface with a procedure with an implicit interface
- msg = "Procedure %s with explicit interface may not be associated with"
- " procedure designator '%s' with implicit interface"_err_en_US;
+ // with an explicit interface with a procedure whose characteristics don't
+ // match. That's the case if the target procedure has an implicit
+ // interface. But this case is allowed by several other compilers as long
+ // as the explicit interface can be called via an implicit interface.
+ if (!lhsProcedure->CanBeCalledViaImplicitInterface()) {
+ msg = "Procedure %s with explicit interface that cannot be called via "
+ "an implicit interface cannot be associated with procedure "
+ "designator with an implicit interface"_err_en_US;
+ }
} else if (!lhsProcedure->HasExplicitInterface() &&
rhsProcedure->HasExplicitInterface()) {
+ // OK if the target can be called via an implicit interface
if (!rhsProcedure->CanBeCalledViaImplicitInterface()) {
msg = "Procedure %s with implicit interface may not be associated "
"with procedure designator '%s' with explicit interface that "
diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
index d27add522a89..98d11438959d 100644
--- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp
+++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp
@@ -927,18 +927,27 @@ struct AllocMemOpConversion : public FIROpConversion<fir::AllocMemOp> {
mlir::LogicalResult
matchAndRewrite(fir::AllocMemOp heap, OpAdaptor adaptor,
mlir::ConversionPatternRewriter &rewriter) const override {
- mlir::Type ty = convertType(heap.getType());
+ auto heapTy = heap.getType();
+ auto ty = convertType(heapTy);
mlir::LLVM::LLVMFuncOp mallocFunc = getMalloc(heap, rewriter);
mlir::Location loc = heap.getLoc();
auto ity = lowerTy().indexType();
- if (auto recTy = fir::unwrapSequenceType(heap.getAllocatedType())
- .dyn_cast<fir::RecordType>())
- if (recTy.getNumLenParams() != 0) {
- TODO(loc,
- "fir.allocmem codegen of derived type with length parameters");
- return failure();
- }
+ auto dataTy = fir::unwrapRefType(heapTy);
+ if (fir::isRecordWithTypeParameters(fir::unwrapSequenceType(dataTy)))
+ TODO(loc, "fir.allocmem codegen of derived type with length parameters");
mlir::Value size = genTypeSizeInBytes(loc, ity, rewriter, ty);
+ // !fir.array<NxMx!fir.char<K,?>> sets `size` to the width of !fir.char<K>.
+ // So multiply the constant dimensions here.
+ if (fir::hasDynamicSize(dataTy))
+ if (auto seqTy = dataTy.dyn_cast<fir::SequenceType>())
+ if (fir::characterWithDynamicLen(seqTy.getEleTy())) {
+ fir::SequenceType::Extent arrSize = 1;
+ for (auto d : seqTy.getShape())
+ if (d != fir::SequenceType::getUnknownExtent())
+ arrSize *= d;
+ size = rewriter.create<mlir::LLVM::MulOp>(
+ loc, ity, size, genConstantIndex(loc, ity, rewriter, arrSize));
+ }
for (mlir::Value opnd : adaptor.getOperands())
size = rewriter.create<mlir::LLVM::MulOp>(
loc, ity, size, integerCast(loc, rewriter, ity, opnd));
diff --git a/flang/lib/Semantics/check-declarations.cpp b/flang/lib/Semantics/check-declarations.cpp
index dcb8cd383403..fdbbcaba55ae 100644
--- a/flang/lib/Semantics/check-declarations.cpp
+++ b/flang/lib/Semantics/check-declarations.cpp
@@ -34,7 +34,6 @@ using characteristics::Procedure;
class CheckHelper {
public:
explicit CheckHelper(SemanticsContext &c) : context_{c} {}
- CheckHelper(SemanticsContext &c, const Scope &s) : context_{c}, scope_{&s} {}
SemanticsContext &context() { return context_; }
void Check() { Check(context_.globalScope()); }
diff --git a/flang/lib/Semantics/program-tree.cpp b/flang/lib/Semantics/program-tree.cpp
index e20299b2fb4c..9d76cfad8380 100644
--- a/flang/lib/Semantics/program-tree.cpp
+++ b/flang/lib/Semantics/program-tree.cpp
@@ -44,6 +44,37 @@ static void GetEntryStmts(
}
}
+// Collects generics that define simple names that could include
+// identically-named subprograms as specific procedures.
+static void GetGenerics(
+ ProgramTree &node, const parser::SpecificationPart &spec) {
+ for (const auto &decl :
+ std::get<std::list<parser::DeclarationConstruct>>(spec.t)) {
+ if (const auto *spec{
+ std::get_if<parser::SpecificationConstruct>(&decl.u)}) {
+ if (const auto *generic{std::get_if<
+ parser::Statement<common::Indirection<parser::GenericStmt>>>(
+ &spec->u)}) {
+ const parser::GenericStmt &genericStmt{generic->statement.value()};
+ const auto &genericSpec{std::get<parser::GenericSpec>(genericStmt.t)};
+ node.AddGeneric(genericSpec);
+ } else if (const auto *interface{
+ std::get_if<common::Indirection<parser::InterfaceBlock>>(
+ &spec->u)}) {
+ const parser::InterfaceBlock &interfaceBlock{interface->value()};
+ const parser::InterfaceStmt &interfaceStmt{
+ std::get<parser::Statement<parser::InterfaceStmt>>(interfaceBlock.t)
+ .statement};
+ const auto *genericSpec{
+ std::get_if<std::optional<parser::GenericSpec>>(&interfaceStmt.u)};
+ if (genericSpec && genericSpec->has_value()) {
+ node.AddGeneric(**genericSpec);
+ }
+ }
+ }
+ }
+}
+
template <typename T>
static ProgramTree BuildSubprogramTree(const parser::Name &name, const T &x) {
const auto &spec{std::get<parser::SpecificationPart>(x.t)};
@@ -53,6 +84,7 @@ static ProgramTree BuildSubprogramTree(const parser::Name &name, const T &x) {
ProgramTree node{name, spec, &exec};
GetEntryStmts(node, spec);
GetEntryStmts(node, exec);
+ GetGenerics(node, spec);
if (subps) {
for (const auto &subp :
std::get<std::list<parser::InternalSubprogram>>(subps->t)) {
@@ -75,6 +107,7 @@ static ProgramTree BuildModuleTree(const parser::Name &name, const T &x) {
const auto &spec{std::get<parser::SpecificationPart>(x.t)};
const auto &subps{std::get<std::optional<parser::ModuleSubprogramPart>>(x.t)};
ProgramTree node{name, spec};
+ GetGenerics(node, spec);
if (subps) {
for (const auto &subp :
std::get<std::list<parser::ModuleSubprogram>>(subps->t)) {
@@ -230,4 +263,8 @@ void ProgramTree::AddEntry(const parser::EntryStmt &entryStmt) {
entryStmts_.emplace_back(entryStmt);
}
+void ProgramTree::AddGeneric(const parser::GenericSpec &generic) {
+ genericSpecs_.emplace_back(generic);
+}
+
} // namespace Fortran::semantics
diff --git a/flang/lib/Semantics/program-tree.h b/flang/lib/Semantics/program-tree.h
index 798abd7ea8d6..dffea5917c1e 100644
--- a/flang/lib/Semantics/program-tree.h
+++ b/flang/lib/Semantics/program-tree.h
@@ -30,6 +30,8 @@ class Scope;
class ProgramTree {
public:
using EntryStmtList = std::list<common::Reference<const parser::EntryStmt>>;
+ using GenericSpecList =
+ std::list<common::Reference<const parser::GenericSpec>>;
// Build the ProgramTree rooted at one of these program units.
static ProgramTree Build(const parser::ProgramUnit &);
@@ -71,10 +73,9 @@ public:
const parser::ExecutionPart *exec() const { return exec_; }
std::list<ProgramTree> &children() { return children_; }
const std::list<ProgramTree> &children() const { return children_; }
- const std::list<common::Reference<const parser::EntryStmt>> &
- entryStmts() const {
- return entryStmts_;
- }
+ const EntryStmtList &entryStmts() const { return entryStmts_; }
+ const GenericSpecList &genericSpecs() const { return genericSpecs_; }
+
Symbol::Flag GetSubpFlag() const;
bool IsModule() const; // Module or Submodule
bool HasModulePrefix() const; // in function or subroutine stmt
@@ -82,6 +83,7 @@ public:
void set_scope(Scope &);
void AddChild(ProgramTree &&);
void AddEntry(const parser::EntryStmt &);
+ void AddGeneric(const parser::GenericSpec &);
template <typename T>
ProgramTree &set_stmt(const parser::Statement<T> &stmt) {
@@ -102,6 +104,7 @@ private:
const parser::ExecutionPart *exec_{nullptr};
std::list<ProgramTree> children_;
EntryStmtList entryStmts_;
+ GenericSpecList genericSpecs_;
Scope *scope_{nullptr};
const parser::CharBlock *endStmt_{nullptr};
bool isSpecificationPartResolved_{false};
diff --git a/flang/lib/Semantics/resolve-names.cpp b/flang/lib/Semantics/resolve-names.cpp
index 11d9acad21ea..2c3925469532 100644
--- a/flang/lib/Semantics/resolve-names.cpp
+++ b/flang/lib/Semantics/resolve-names.cpp
@@ -7054,6 +7054,18 @@ void ResolveNamesVisitor::AddSubpNames(ProgramTree &node) {
symbol.set(child.GetSubpFlag());
}
}
+ for (const auto &generic : node.genericSpecs()) {
+ if (const auto *name{std::get_if<parser::Name>(&generic->u)}) {
+ if (currScope().find(name->source) != currScope().end()) {
+ // If this scope has both a generic interface and a contained
+ // subprogram with the same name, create the generic's symbol
+ // now so that any other generics of the same name that are pulled
+ // into scope later via USE association will properly merge instead
+ // of raising a bogus error due a conflict with the subprogram.
+ CreateGeneric(*generic);
+ }
+ }
+ }
}
// Push a new scope for this node or return false on error.
diff --git a/flang/runtime/transformational.cpp b/flang/runtime/transformational.cpp
index 79d1373f79a5..46cf2ec9b732 100644
--- a/flang/runtime/transformational.cpp
+++ b/flang/runtime/transformational.cpp
@@ -377,7 +377,9 @@ void RTNAME(Reshape)(Descriptor &result, const Descriptor &source,
for (SubscriptValue j{0}; j < resultRank; ++j, ++shapeSubscript) {
resultExtent[j] = GetInt64(
shape.Element<char>(&shapeSubscript), shapeElementBytes, terminator);
- RUNTIME_CHECK(terminator, resultExtent[j] >= 0);
+ if (resultExtent[j] < 0)
+ terminator.Crash(
+ "RESHAPE: bad value for SHAPE(%d)=%d", j + 1, resultExtent[j]);
resultElements *= resultExtent[j];
}
@@ -387,7 +389,9 @@ void RTNAME(Reshape)(Descriptor &result, const Descriptor &source,
std::size_t sourceElements{source.Elements()};
std::size_t padElements{pad ? pad->Elements() : 0};
if (resultElements > sourceElements) {
- RUNTIME_CHECK(terminator, padElements > 0);
+ if (padElements <= 0)
+ terminator.Crash("RESHAPE: not eough elements, need %d but only have %d",
+ resultElements, sourceElements);
RUNTIME_CHECK(terminator, pad->ElementBytes() == elementBytes);
}
@@ -397,15 +401,18 @@ void RTNAME(Reshape)(Descriptor &result, const Descriptor &source,
if (order) {
RUNTIME_CHECK(terminator, order->rank() == 1);
RUNTIME_CHECK(terminator, order->type().IsInteger());
- RUNTIME_CHECK(terminator, order->GetDimension(0).Extent() == resultRank);
+ if (order->GetDimension(0).Extent() != resultRank)
+ terminator.Crash("RESHAPE: the extent of ORDER (%d) must match the rank"
+ " of the SHAPE (%d)",
+ order->GetDimension(0).Extent(), resultRank);
std::uint64_t values{0};
SubscriptValue orderSubscript{order->GetDimension(0).LowerBound()};
std::size_t orderElementBytes{order->ElementBytes()};
for (SubscriptValue j{0}; j < resultRank; ++j, ++orderSubscript) {
auto k{GetInt64(order->Element<char>(&orderSubscript), orderElementBytes,
terminator)};
- RUNTIME_CHECK(
- terminator, k >= 1 && k <= resultRank && !((values >> k) & 1));
+ if (k < 1 || k > resultRank || ((values >> k) & 1))
+ terminator.Crash("RESHAPE: bad value for ORDER element (%d)", k);
values |= std::uint64_t{1} << k;
dimOrder[j] = k - 1;
}
diff --git a/flang/test/Fir/alloc.fir b/flang/test/Fir/alloc.fir
new file mode 100644
index 000000000000..e18a888e803d
--- /dev/null
+++ b/flang/test/Fir/alloc.fir
@@ -0,0 +1,83 @@
+// RUN: tco %s | FileCheck %s
+
+// UNSUPPORTED: system-windows
+
+// CHECK-LABEL: define i32* @f1()
+func @f1() -> !fir.ref<i32> {
+ // CHECK: alloca i32, i64 1
+ %1 = fir.alloca i32
+ return %1 : !fir.ref<i32>
+}
+
+// CHECK-LABEL: define i32* @f2()
+func @f2() -> !fir.ref<i32> {
+ %0 = arith.constant 100 : index
+ // CHECK: alloca i32, i64 100
+ %1 = fir.alloca i32, %0
+ return %1 : !fir.ref<i32>
+}
+
+// CHECK-LABEL: define i32* @f3()
+func @f3() -> !fir.heap<i32> {
+ // CHECK: call i8* @malloc(i64 4)
+ %1 = fir.allocmem i32
+ return %1 : !fir.heap<i32>
+}
+
+// CHECK-LABEL: define i32* @f4()
+func @f4() -> !fir.heap<i32> {
+ %0 = arith.constant 100 : index
+ // CHECK: call i8* @malloc(i64 400)
+ %1 = fir.allocmem i32, %0
+ return %1 : !fir.heap<i32>
+}
+
+// CHECK-LABEL: define i32** @f5()
+func @f5() -> !fir.ref<!fir.ptr<!fir.array<?xi32>>> {
+ // CHECK: alloca i32*, i64 1
+ %1 = fir.alloca !fir.ptr<!fir.array<?xi32>>
+ return %1 : !fir.ref<!fir.ptr<!fir.array<?xi32>>>
+}
+
+// CHECK-LABEL: define i8* @char_array_alloca(
+// CHECK-SAME: i32 %[[l:.*]], i64 %[[e:.*]])
+func @char_array_alloca(%l: i32, %e : index) -> !fir.ref<!fir.array<?x?x!fir.char<1,?>>> {
+ // CHECK: %[[lcast:.*]] = sext i32 %[[l]] to i64
+ // CHECK: %[[prod:.*]] = mul i64 %[[lcast]], %[[e]]
+ // CHECK: %[[size:.*]] = mul i64 %[[prod]], %[[e]]
+ // CHECK: alloca i8, i64 %[[size]]
+ %a = fir.alloca !fir.array<?x?x!fir.char<1,?>>(%l : i32), %e, %e
+ return %a : !fir.ref<!fir.array<?x?x!fir.char<1,?>>>
+}
+
+// Constant factor of 60 (4*3*5) must be included.
+// CHECK-LABEL: define i32* @array_with_holes(
+// CHECK-SAME: i64 %[[a:.*]], i64 %[[b:.*]])
+func @array_with_holes(%0 : index, %1 : index) -> !fir.ref<!fir.array<4x?x3x?x5xi32>> {
+ // CHECK: %[[prod1:.*]] = mul i64 60, %[[a]]
+ // CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[b]]
+ // CHECK: alloca i32, i64 %[[prod2]]
+ %a = fir.alloca !fir.array<4x?x3x?x5xi32>, %0, %1
+ return %a : !fir.ref<!fir.array<4x?x3x?x5xi32>>
+}
+
+// CHECK-LABEL: define void @allocmem_array_of_dynchar(
+// CHECK-SAME: i64 %[[arg:.*]])
+// CHECK: %[[mul:.*]] = mul i64 9, %[[arg]]
+// CHECK: %[[malloc:.*]] = call i8* @malloc(i64 %[[mul]])
+// CHECK: ret void
+func @allocmem_array_of_dynchar(%arg0: index) {
+ %1 = fir.allocmem !fir.array<3x3x!fir.char<1,?>>(%arg0 : index)
+ return
+}
+
+// CHECK-LABEL: define void @allocmem_dynarray_of_dynchar(
+// CHECK-SAME: i64 %[[len:.*]], i64 %[[extent:.*]])
+// CHECK: %[[a:.*]] = mul i64 24, %[[len]]
+// CHECK: %[[b:.*]] = mul i64 %[[a]], %[[extent]]
+// CHECK: %[[malloc:.*]] = call i8* @malloc(i64 %[[b]])
+// CHECK: ret void
+func @allocmem_dynarray_of_dynchar(%arg0: index, %arg1: index) {
+ %1 = fir.allocmem !fir.array<3x?x4x!fir.char<2,?>>(%arg0 : index), %arg1
+ return
+}
diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir
index ef079b63007a..32b0bd58777f 100644
--- a/flang/test/Fir/convert-to-llvm.fir
+++ b/flang/test/Fir/convert-to-llvm.fir
@@ -230,7 +230,9 @@ func @test_string_with_shape(%len: index, %nelems: index) {
// CHECK-LABEL: llvm.func @test_string_with_shape
// CHECK-SAME: %[[LEN:.*]]: i64, %[[NELEMS:.*]]: i64)
// CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64
-// CHECK: %[[LEN_SIZE:.*]] = llvm.mul %[[ONE]], %[[LEN]] : i64
+// CHECK: %[[ONE2:.*]] = llvm.mlir.constant(1 : i64) : i64
+// CHECK: %[[MUL1:.*]] = llvm.mul %[[ONE]], %[[ONE2]] : i64
+// CHECK: %[[LEN_SIZE:.*]] = llvm.mul %[[MUL1]], %[[LEN]] : i64
// CHECK: %[[TOTAL_SIZE:.*]] = llvm.mul %[[LEN_SIZE]], %[[NELEMS]] : i64
// CHECK: %[[MEM:.*]] = llvm.call @malloc(%[[TOTAL_SIZE]])
// CHECK: %[[B1:.*]] = llvm.bitcast %[[MEM]] : !llvm.ptr<i8> to !llvm.ptr<i8>
diff --git a/flang/test/Semantics/assign03.f90 b/flang/test/Semantics/assign03.f90
index ce5897d41dc6..0c385b167c92 100644
--- a/flang/test/Semantics/assign03.f90
+++ b/flang/test/Semantics/assign03.f90
@@ -176,8 +176,7 @@ contains
procedure(s), pointer :: p, q
procedure(), pointer :: r
external :: s_external
- !ERROR: Procedure pointer 'p' with explicit interface may not be associated with procedure designator 's_external' with implicit interface
- p => s_external
+ p => s_external ! OK for a pointer with an explicit interface to be associated with a procedure with an implicit interface
r => s_module ! OK for a pointer with implicit interface to be associated with a procedure with an explicit interface. See 10.2.2.4 (3)
end
diff --git a/flang/test/Semantics/associated.f90 b/flang/test/Semantics/associated.f90
index 3d472bdc0666..34583c477e16 100644
--- a/flang/test/Semantics/associated.f90
+++ b/flang/test/Semantics/associated.f90
@@ -67,6 +67,7 @@ subroutine assoc()
procedure(subrInt) :: subProc
procedure(subrInt), pointer :: subProcPointer
procedure(), pointer :: implicitProcPointer
+ procedure(subrCannotBeCalledfromImplicit), pointer :: cannotBeCalledfromImplicitPointer
logical :: lVar
type(t1) :: t1x
type(t1), target :: t1xtarget
@@ -158,10 +159,8 @@ subroutine assoc()
realProcPointer1 => intProc
!ERROR: Procedure pointer 'realprocpointer1' associated with incompatible procedure designator 'intproc'
lvar = associated(realProcPointer1, intProc)
- !ERROR: Procedure pointer 'subprocpointer' with explicit interface may not be associated with procedure designator 'externalproc' with implicit interface
- subProcPointer => externalProc
- !ERROR: Procedure pointer 'subprocpointer' with explicit interface may not be associated with procedure designator 'externalproc' with implicit interface
- lvar = associated(subProcPointer, externalProc)
+ subProcPointer => externalProc ! OK to associate a procedure pointer with an explicit interface to a procedure with an implicit interface
+ lvar = associated(subProcPointer, externalProc) ! OK to associate a procedure pointer with an explicit interface to a procedure with an implicit interface
!ERROR: Subroutine pointer 'subprocpointer' may not be associated with function designator 'intproc'
subProcPointer => intProc
!ERROR: Subroutine pointer 'subprocpointer' may not be associated with function designator 'intproc'
@@ -174,5 +173,9 @@ subroutine assoc()
lvar = associated(implicitProcPointer, subr) ! OK
!ERROR: Procedure pointer 'implicitprocpointer' with implicit interface may not be associated with procedure designator 'subrcannotbecalledfromimplicit' with explicit interface that cannot be called via an implicit interface
lvar = associated(implicitProcPointer, subrCannotBeCalledFromImplicit)
+ !ERROR: Procedure pointer 'cannotbecalledfromimplicitpointer' with explicit interface that cannot be called via an implicit interface cannot be associated with procedure designator with an implicit interface
+ cannotBeCalledfromImplicitPointer => externalProc
+ !ERROR: Procedure pointer 'cannotbecalledfromimplicitpointer' with explicit interface that cannot be called via an implicit interface cannot be associated with procedure designator with an implicit interface
+ lvar = associated(cannotBeCalledfromImplicitPointer, externalProc)
end subroutine test
end subroutine assoc
diff --git a/flang/test/Semantics/reshape.f90 b/flang/test/Semantics/reshape.f90
index 8113bffc577b..31071332f50f 100644
--- a/flang/test/Semantics/reshape.f90
+++ b/flang/test/Semantics/reshape.f90
@@ -35,6 +35,8 @@ program reshaper
integer, parameter :: array16(1) = RESHAPE([(n,n=1,8)],[1], [0], array15)
integer, parameter, dimension(3,4) :: array17 = 3
integer, parameter, dimension(3,4) :: array18 = RESHAPE(array17, [3,4])
+ integer, parameter, dimension(2,2) :: bad_order = reshape([1, 2, 3, 4], [2,2])
+ real :: array20(2,3)
! Implicit reshape of array of components
type :: dType
integer :: field(2)
@@ -47,4 +49,6 @@ program reshaper
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]))
!ERROR: 'shape=' argument must not have a negative extent
CALL ext_sub(RESHAPE([(n, n=1,20)], [1, -5, 3]))
+ !ERROR: 'order=' argument has unacceptable rank 2
+ array20 = RESHAPE([(n, n = 1, 4)], [2, 3], order = bad_order)
end program reshaper
diff --git a/flang/test/Semantics/resolve18.f90 b/flang/test/Semantics/resolve18.f90
index 1f59794b7e38..16f6ac5ff5f6 100644
--- a/flang/test/Semantics/resolve18.f90
+++ b/flang/test/Semantics/resolve18.f90
@@ -182,3 +182,29 @@ contains
function f13()
end function f13
end module m13
+
+! Not an error
+module m14
+ interface gen1
+ module procedure s
+ end interface
+ generic :: gen2 => s
+ contains
+ subroutine s(x)
+ integer(1) :: x
+ end subroutine s
+end module m14
+module m15
+ use m14
+ interface gen1
+ module procedure gen1
+ end interface
+ generic :: gen2 => gen2
+ contains
+ subroutine gen1(x)
+ integer(2) :: x
+ end subroutine gen1
+ subroutine gen2(x)
+ integer(4) :: x
+ end subroutine gen2
+end module m15
diff --git a/flang/test/Semantics/resolve69.f90 b/flang/test/Semantics/resolve69.f90
index e3f00e2bb27a..ee3e21a6a870 100644
--- a/flang/test/Semantics/resolve69.f90
+++ b/flang/test/Semantics/resolve69.f90
@@ -64,3 +64,15 @@ Program d5
line%value = 'ok'
Print *,Trim(line%value)
End Program
+
+!Not errors.
+subroutine outer
+ integer n
+ contains
+ character(n) function inner1()
+ inner1 = ''
+ end function inner1
+ function inner2()
+ real inner2(n)
+ end function inner2
+end subroutine outer
diff --git a/libc/config/linux/aarch64/entrypoints.txt b/libc/config/linux/aarch64/entrypoints.txt
index 8575ab8f048d..c80e6bee82b7 100644
--- a/libc/config/linux/aarch64/entrypoints.txt
+++ b/libc/config/linux/aarch64/entrypoints.txt
@@ -55,6 +55,10 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.string.strtok
libc.src.string.strtok_r
+ # string.h entrypoints that depend on malloc
+ libc.src.string.strdup
+ libc.src.string.strndup
+
# inttypes.h entrypoints
libc.src.inttypes.imaxdiv
libc.src.inttypes.strtoimax
@@ -81,6 +85,12 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.strtoul
libc.src.stdlib.strtoull
+ # stdlib.h external entrypoints
+ libc.src.stdlib.malloc
+ libc.src.stdlib.calloc
+ libc.src.stdlib.realloc
+ libc.src.stdlib.free
+
# sys/stat.h entrypoints
libc.src.sys.stat.mkdir
libc.src.sys.stat.mkdirat
diff --git a/libc/config/linux/x86_64/entrypoints.txt b/libc/config/linux/x86_64/entrypoints.txt
index 3de1e6ef72ad..f21d032464dc 100644
--- a/libc/config/linux/x86_64/entrypoints.txt
+++ b/libc/config/linux/x86_64/entrypoints.txt
@@ -55,6 +55,10 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.string.strtok
libc.src.string.strtok_r
+ # string.h entrypoints that depend on malloc
+ libc.src.string.strdup
+ libc.src.string.strndup
+
# inttypes.h entrypoints
libc.src.inttypes.imaxdiv
libc.src.inttypes.strtoimax
@@ -81,6 +85,12 @@ set(TARGET_LIBC_ENTRYPOINTS
libc.src.stdlib.strtoul
libc.src.stdlib.strtoull
+ # stdlib.h external entrypoints
+ libc.src.stdlib.malloc
+ libc.src.stdlib.calloc
+ libc.src.stdlib.realloc
+ libc.src.stdlib.free
+
# sys/mman.h entrypoints
libc.src.sys.mman.mmap
libc.src.sys.mman.munmap
@@ -251,25 +261,6 @@ if(LLVM_LIBC_FULL_BUILD)
)
endif()
-if(LLVM_LIBC_INCLUDE_SCUDO)
- list(APPEND TARGET_LIBC_ENTRYPOINTS
-
- # stdlib.h external entrypoints
- libc.src.stdlib.malloc
- libc.src.stdlib.calloc
- libc.src.stdlib.realloc
- libc.src.stdlib.free
- )
-endif()
-
-if(LLVM_LIBC_INCLUDE_SCUDO OR NOT LLVM_LIBC_FULL_BUILD)
- list(APPEND TARGET_LIBC_ENTRYPOINTS
- # string.h entrypoints that depend on malloc
- libc.src.string.strdup
- libc.src.string.strndup
- )
-endif()
-
set(TARGET_LLVMLIBC_ENTRYPOINTS
${TARGET_LIBC_ENTRYPOINTS}
${TARGET_LIBM_ENTRYPOINTS}
diff --git a/libc/src/__support/CPP/CMakeLists.txt b/libc/src/__support/CPP/CMakeLists.txt
index 8a61fc76922f..b591734a6b6f 100644
--- a/libc/src/__support/CPP/CMakeLists.txt
+++ b/libc/src/__support/CPP/CMakeLists.txt
@@ -12,12 +12,10 @@ add_header_library(
TypeTraits.h
)
-if(LLVM_LIBC_INCLUDE_SCUDO OR NOT LLVM_LIBC_FULL_BUILD)
- add_header_library(
- vector
- HDRS
- vector.h
- DEPENDS
- libc.include.stdlib
- )
-endif()
+add_header_library(
+ vector
+ HDRS
+ vector.h
+ DEPENDS
+ libc.include.stdlib
+)
diff --git a/libc/src/stdlib/CMakeLists.txt b/libc/src/stdlib/CMakeLists.txt
index b333def651e6..cb3aeb808949 100644
--- a/libc/src/stdlib/CMakeLists.txt
+++ b/libc/src/stdlib/CMakeLists.txt
@@ -230,7 +230,19 @@ if(LLVM_LIBC_INCLUDE_SCUDO)
DEPENDS
${SCUDO_DEPS}
)
-
+else()
+ add_entrypoint_external(
+ malloc
+ )
+ add_entrypoint_external(
+ calloc
+ )
+ add_entrypoint_external(
+ realloc
+ )
+ add_entrypoint_external(
+ free
+ )
endif()
if(NOT LLVM_LIBC_FULL_BUILD)
diff --git a/libc/test/src/__support/CMakeLists.txt b/libc/test/src/__support/CMakeLists.txt
index 20d0e2d75745..923db635c8a5 100644
--- a/libc/test/src/__support/CMakeLists.txt
+++ b/libc/test/src/__support/CMakeLists.txt
@@ -49,4 +49,5 @@ add_custom_command(TARGET libc_str_to_float_comparison_test
COMMENT "Test the strtof and strtod implementations against precomputed results."
VERBATIM)
+add_subdirectory(CPP)
add_subdirectory(OSUtil)
diff --git a/libc/test/utils/CPP/CMakeLists.txt b/libc/test/src/__support/CPP/CMakeLists.txt
index a02636a07abb..7d4f15b7af5c 100644
--- a/libc/test/utils/CPP/CMakeLists.txt
+++ b/libc/test/src/__support/CPP/CMakeLists.txt
@@ -40,14 +40,12 @@ add_libc_unittest(
libc.src.__support.CPP.standalone_cpp
)
-if(LLVM_LIBC_INCLUDE_SCUDO OR NOT LLVM_LIBC_FULL_BUILD)
- add_libc_unittest(
- vector_test
- SUITE
- libc_cpp_utils_unittests
- SRCS
- vector_test.cpp
- DEPENDS
- libc.src.__support.CPP.vector
- )
-endif()
+add_libc_unittest(
+ vector_test
+ SUITE
+ libc_cpp_utils_unittests
+ SRCS
+ vector_test.cpp
+ DEPENDS
+ libc.src.__support.CPP.vector
+)
diff --git a/libc/test/utils/CPP/arrayref_test.cpp b/libc/test/src/__support/CPP/arrayref_test.cpp
index 79466c7d2362..79466c7d2362 100644
--- a/libc/test/utils/CPP/arrayref_test.cpp
+++ b/libc/test/src/__support/CPP/arrayref_test.cpp
diff --git a/libc/test/utils/CPP/bitset_test.cpp b/libc/test/src/__support/CPP/bitset_test.cpp
index 4bcddad5715c..4bcddad5715c 100644
--- a/libc/test/utils/CPP/bitset_test.cpp
+++ b/libc/test/src/__support/CPP/bitset_test.cpp
diff --git a/libc/test/utils/CPP/limits_test.cpp b/libc/test/src/__support/CPP/limits_test.cpp
index fa0ae38cae6d..fa0ae38cae6d 100644
--- a/libc/test/utils/CPP/limits_test.cpp
+++ b/libc/test/src/__support/CPP/limits_test.cpp
diff --git a/libc/test/utils/CPP/stringview_test.cpp b/libc/test/src/__support/CPP/stringview_test.cpp
index a62cde9bfb81..a62cde9bfb81 100644
--- a/libc/test/utils/CPP/stringview_test.cpp
+++ b/libc/test/src/__support/CPP/stringview_test.cpp
diff --git a/libc/test/utils/CPP/vector_test.cpp b/libc/test/src/__support/CPP/vector_test.cpp
index a27eafa151bc..a27eafa151bc 100644
--- a/libc/test/utils/CPP/vector_test.cpp
+++ b/libc/test/src/__support/CPP/vector_test.cpp
diff --git a/libc/test/utils/CMakeLists.txt b/libc/test/utils/CMakeLists.txt
index ae00987eeda4..d98c401f18a8 100644
--- a/libc/test/utils/CMakeLists.txt
+++ b/libc/test/utils/CMakeLists.txt
@@ -1,5 +1,4 @@
add_subdirectory(FPUtil)
-add_subdirectory(CPP)
add_subdirectory(UnitTest)
if(NOT LLVM_LIBC_FULL_BUILD)
diff --git a/libcxx/cmake/caches/Generic-asan.cmake b/libcxx/cmake/caches/Generic-asan.cmake
index cf919765c3a2..a86b34748550 100644
--- a/libcxx/cmake/caches/Generic-asan.cmake
+++ b/libcxx/cmake/caches/Generic-asan.cmake
@@ -1 +1,3 @@
set(LLVM_USE_SANITIZER "Address" CACHE STRING "")
+# This is a temporary (hopefully) workaround for an ASan issue (see https://llvm.org/D119410).
+set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -mllvm -asan-use-private-alias=1" CACHE INTERNAL "")
diff --git a/libcxx/docs/Status/Cxx2bIssues.csv b/libcxx/docs/Status/Cxx2bIssues.csv
index 45fa9826ec0a..b806f8be79a7 100644
--- a/libcxx/docs/Status/Cxx2bIssues.csv
+++ b/libcxx/docs/Status/Cxx2bIssues.csv
@@ -151,10 +151,10 @@
"`3621 <https://wg21.link/LWG3621>`__","Remove feature-test macro ``__cpp_lib_monadic_optional`` ","February 2022","",""
"`3632 <https://wg21.link/LWG3632>`__","``unique_ptr`` ""Mandates: This constructor is not selected by class tmeplate argument deduction""","February 2022","|Nothing to do|",""
"`3643 <https://wg21.link/LWG3643>`__","Missing ``constexpr`` in ``std::counted_iterator`` ","February 2022","",""
-"`3648 <https://wg21.link/LWG3648>`__","``format`` should not print ``bool`` with ``'c'`` ","February 2022","","","|format|"
+"`3648 <https://wg21.link/LWG3648>`__","``format`` should not print ``bool`` with ``'c'`` ","February 2022","|Complete|","15.0","|format|"
"`3649 <https://wg21.link/LWG3649>`__","[fund.ts.v2] Reinstate and bump ``__cpp_lib_experimental_memory_resource`` feature test macro","February 2022","",""
"`3650 <https://wg21.link/LWG3650>`__","Are ``std::basic_string`` 's ``iterator`` and ``const_iterator`` constexpr iterators?","February 2022","|Nothing to do|",""
-"`3654 <https://wg21.link/LWG3654>`__","``basic_format_context::arg(size_t)`` should be ``noexcept`` ","February 2022","","","|format|"
+"`3654 <https://wg21.link/LWG3654>`__","``basic_format_context::arg(size_t)`` should be ``noexcept`` ","February 2022","|Complete|","15.0","|format|"
"`3657 <https://wg21.link/LWG3657>`__","``std::hash<std::filesystem::path>`` is not enabled","February 2022","",""
"`3660 <https://wg21.link/LWG3660>`__","``iterator_traits<common_iterator>::pointer`` should conform to §[iterator.traits]","February 2022","|Complete|","14.0"
"`3661 <https://wg21.link/LWG3661>`__","``constinit atomic<shared_ptr<T>> a (nullptr);`` should work","February 2022","",""
diff --git a/libcxx/include/CMakeLists.txt b/libcxx/include/CMakeLists.txt
index f2a85c717f84..40ada5c9ce9e 100644
--- a/libcxx/include/CMakeLists.txt
+++ b/libcxx/include/CMakeLists.txt
@@ -219,7 +219,6 @@ set(files
__functional/unary_negate.h
__functional/unwrap_ref.h
__functional/weak_result_type.h
- __functional_base
__hash_table
__ios/fpos.h
__iterator/access.h
diff --git a/libcxx/include/__algorithm/in_fun_result.h b/libcxx/include/__algorithm/in_fun_result.h
index db952aea701a..d5186e11b7ea 100644
--- a/libcxx/include/__algorithm/in_fun_result.h
+++ b/libcxx/include/__algorithm/in_fun_result.h
@@ -25,8 +25,8 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
template <class _Ip, class _Fp>
struct in_fun_result {
- [[no_unique_address]] _Ip in;
- [[no_unique_address]] _Fp fun;
+ _LIBCPP_NO_UNIQUE_ADDRESS _Ip in;
+ _LIBCPP_NO_UNIQUE_ADDRESS _Fp fun;
template <class _I2, class _F2>
requires convertible_to<const _Ip&, _I2> && convertible_to<const _Fp&, _F2>
diff --git a/libcxx/include/__algorithm/in_in_out_result.h b/libcxx/include/__algorithm/in_in_out_result.h
index d020b918ac73..3a163d1a75b6 100644
--- a/libcxx/include/__algorithm/in_in_out_result.h
+++ b/libcxx/include/__algorithm/in_in_out_result.h
@@ -26,9 +26,9 @@ namespace ranges {
template <class _I1, class _I2, class _O1>
struct in_in_out_result {
- [[no_unique_address]] _I1 in1;
- [[no_unique_address]] _I2 in2;
- [[no_unique_address]] _O1 out;
+ _LIBCPP_NO_UNIQUE_ADDRESS _I1 in1;
+ _LIBCPP_NO_UNIQUE_ADDRESS _I2 in2;
+ _LIBCPP_NO_UNIQUE_ADDRESS _O1 out;
template <class _II1, class _II2, class _OO1>
requires convertible_to<const _I1&, _II1> && convertible_to<const _I2&, _II2> && convertible_to<const _O1&, _OO1>
diff --git a/libcxx/include/__algorithm/in_in_result.h b/libcxx/include/__algorithm/in_in_result.h
index e2585a709c1c..159092189a53 100644
--- a/libcxx/include/__algorithm/in_in_result.h
+++ b/libcxx/include/__algorithm/in_in_result.h
@@ -26,8 +26,8 @@ namespace ranges {
template <class _I1, class _I2>
struct in_in_result {
- [[no_unique_address]] _I1 in1;
- [[no_unique_address]] _I2 in2;
+ _LIBCPP_NO_UNIQUE_ADDRESS _I1 in1;
+ _LIBCPP_NO_UNIQUE_ADDRESS _I2 in2;
template <class _II1, class _II2>
requires convertible_to<const _I1&, _II1> && convertible_to<const _I2&, _II2>
diff --git a/libcxx/include/__algorithm/in_out_out_result.h b/libcxx/include/__algorithm/in_out_out_result.h
index e97a89147378..9788fbfa2e74 100644
--- a/libcxx/include/__algorithm/in_out_out_result.h
+++ b/libcxx/include/__algorithm/in_out_out_result.h
@@ -25,9 +25,9 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
template <class _I1, class _O1, class _O2>
struct in_out_out_result {
- [[no_unique_address]] _I1 in;
- [[no_unique_address]] _O1 out1;
- [[no_unique_address]] _O2 out2;
+ _LIBCPP_NO_UNIQUE_ADDRESS _I1 in;
+ _LIBCPP_NO_UNIQUE_ADDRESS _O1 out1;
+ _LIBCPP_NO_UNIQUE_ADDRESS _O2 out2;
template <class _II1, class _OO1, class _OO2>
requires convertible_to<const _I1&, _II1> && convertible_to<const _O1&, _OO1> && convertible_to<const _O2&, _OO2>
diff --git a/libcxx/include/__algorithm/in_out_result.h b/libcxx/include/__algorithm/in_out_result.h
index cdd6ab48fbd2..124b4d6ffb6c 100644
--- a/libcxx/include/__algorithm/in_out_result.h
+++ b/libcxx/include/__algorithm/in_out_result.h
@@ -26,8 +26,8 @@ namespace ranges {
template<class _InputIterator, class _OutputIterator>
struct in_out_result {
- [[no_unique_address]] _InputIterator in;
- [[no_unique_address]] _OutputIterator out;
+ _LIBCPP_NO_UNIQUE_ADDRESS _InputIterator in;
+ _LIBCPP_NO_UNIQUE_ADDRESS _OutputIterator out;
template <class _InputIterator2, class _OutputIterator2>
requires convertible_to<const _InputIterator&, _InputIterator2> && convertible_to<const _OutputIterator&,
diff --git a/libcxx/include/__algorithm/ranges_swap_ranges.h b/libcxx/include/__algorithm/ranges_swap_ranges.h
index cced873836db..59a875ae3bb2 100644
--- a/libcxx/include/__algorithm/ranges_swap_ranges.h
+++ b/libcxx/include/__algorithm/ranges_swap_ranges.h
@@ -17,7 +17,6 @@
#include <__ranges/concepts.h>
#include <__ranges/dangling.h>
#include <__utility/move.h>
-#include <type_traits>
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
diff --git a/libcxx/include/__config b/libcxx/include/__config
index 2f0e8d4cf81e..e455eacc1948 100644
--- a/libcxx/include/__config
+++ b/libcxx/include/__config
@@ -1398,6 +1398,28 @@ extern "C" _LIBCPP_FUNC_VIS void __sanitizer_annotate_contiguous_container(
# define _LIBCPP_ATTRIBUTE_FORMAT(archetype, format_string_index, first_format_arg_index) /* nothing */
#endif
+#if __has_cpp_attribute(msvc::no_unique_address)
+ // MSVC implements [[no_unique_address]] as a silent no-op currently.
+ // (If/when MSVC breaks its C++ ABI, it will be changed to work as intended.)
+ // However, MSVC implements [[msvc::no_unique_address]] which does what
+ // [[no_unique_address]] is supposed to do, in general.
+
+ // Clang-cl does not yet (14.0) implement either [[no_unique_address]] or
+ // [[msvc::no_unique_address]] though. If/when it does implement
+ // [[msvc::no_unique_address]], this should be preferred though.
+# define _LIBCPP_NO_UNIQUE_ADDRESS [[msvc::no_unique_address]]
+#elif __has_cpp_attribute(no_unique_address)
+# define _LIBCPP_NO_UNIQUE_ADDRESS [[no_unique_address]]
+#else
+# define _LIBCPP_NO_UNIQUE_ADDRESS /* nothing */
+ // Note that this can be replaced by #error as soon as clang-cl
+ // implements msvc::no_unique_address, since there should be no C++20
+ // compiler that doesn't support one of the two attributes at that point.
+ // We geenrally don't want to use this macro outside of C++20-only code,
+ // because using it conditionally in one language version only would make
+ // the ABI inconsistent.
+#endif
+
#endif // __cplusplus
#endif // _LIBCPP_CONFIG
diff --git a/libcxx/include/__format/format_arg.h b/libcxx/include/__format/format_arg.h
index fa8a41e1723d..df08e93d2ee3 100644
--- a/libcxx/include/__format/format_arg.h
+++ b/libcxx/include/__format/format_arg.h
@@ -15,7 +15,6 @@
#include <__format/format_error.h>
#include <__format/format_fwd.h>
#include <__format/format_parse_context.h>
-#include <__functional_base>
#include <__memory/addressof.h>
#include <__variant/monostate.h>
#include <string>
diff --git a/libcxx/include/__format/format_context.h b/libcxx/include/__format/format_context.h
index 705dbd54db02..d4bc35decda6 100644
--- a/libcxx/include/__format/format_context.h
+++ b/libcxx/include/__format/format_context.h
@@ -98,7 +98,7 @@ public:
basic_format_context& operator=(const basic_format_context&) = delete;
_LIBCPP_HIDE_FROM_ABI basic_format_arg<basic_format_context>
- arg(size_t __id) const {
+ arg(size_t __id) const noexcept {
return __args_.get(__id);
}
#ifndef _LIBCPP_HAS_NO_LOCALIZATION
diff --git a/libcxx/include/__format/formatter_bool.h b/libcxx/include/__format/formatter_bool.h
index 52d5224a15a1..f42868365771 100644
--- a/libcxx/include/__format/formatter_bool.h
+++ b/libcxx/include/__format/formatter_bool.h
@@ -54,10 +54,6 @@ public:
this->__handle_bool();
break;
- case _Flags::_Type::__char:
- this->__handle_char();
- break;
-
case _Flags::_Type::__binary_lower_case:
case _Flags::_Type::__binary_upper_case:
case _Flags::_Type::__octal:
diff --git a/libcxx/include/__functional_base b/libcxx/include/__functional_base
deleted file mode 100644
index caa3d7fd86ce..000000000000
--- a/libcxx/include/__functional_base
+++ /dev/null
@@ -1,32 +0,0 @@
-// -*- C++ -*-
-//===----------------------------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef _LIBCPP_FUNCTIONAL_BASE
-#define _LIBCPP_FUNCTIONAL_BASE
-
-#include <__config>
-#include <__functional/binary_function.h>
-#include <__functional/invoke.h>
-#include <__functional/operations.h>
-#include <__functional/reference_wrapper.h>
-#include <__functional/unary_function.h>
-#include <__functional/weak_result_type.h>
-#include <__memory/allocator_arg_t.h>
-#include <__memory/uses_allocator.h>
-#include <exception>
-#include <new>
-#include <type_traits>
-#include <typeinfo>
-#include <utility>
-
-#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
-# pragma GCC system_header
-#endif
-
-#endif // _LIBCPP_FUNCTIONAL_BASE
diff --git a/libcxx/include/__iterator/counted_iterator.h b/libcxx/include/__iterator/counted_iterator.h
index 29de729e5ab1..c67743fbe37f 100644
--- a/libcxx/include/__iterator/counted_iterator.h
+++ b/libcxx/include/__iterator/counted_iterator.h
@@ -65,7 +65,7 @@ class counted_iterator
, public __counted_iterator_value_type<_Iter>
{
public:
- [[no_unique_address]] _Iter __current_ = _Iter();
+ _LIBCPP_NO_UNIQUE_ADDRESS _Iter __current_ = _Iter();
iter_difference_t<_Iter> __count_ = 0;
using iterator_type = _Iter;
diff --git a/libcxx/include/__memory/shared_ptr.h b/libcxx/include/__memory/shared_ptr.h
index a1a1b26be829..0f28bcdfac11 100644
--- a/libcxx/include/__memory/shared_ptr.h
+++ b/libcxx/include/__memory/shared_ptr.h
@@ -15,7 +15,6 @@
#include <__functional/binary_function.h>
#include <__functional/operations.h>
#include <__functional/reference_wrapper.h>
-#include <__functional_base>
#include <__memory/addressof.h>
#include <__memory/allocation_guard.h>
#include <__memory/allocator.h>
diff --git a/libcxx/include/__memory/unique_ptr.h b/libcxx/include/__memory/unique_ptr.h
index 94c1704ea5f1..8b330508511a 100644
--- a/libcxx/include/__memory/unique_ptr.h
+++ b/libcxx/include/__memory/unique_ptr.h
@@ -13,7 +13,6 @@
#include <__config>
#include <__functional/hash.h>
#include <__functional/operations.h>
-#include <__functional_base>
#include <__memory/allocator_traits.h> // __pointer
#include <__memory/compressed_pair.h>
#include <__utility/forward.h>
diff --git a/libcxx/include/__ranges/access.h b/libcxx/include/__ranges/access.h
index 168e0304f509..5b623c1e4a2a 100644
--- a/libcxx/include/__ranges/access.h
+++ b/libcxx/include/__ranges/access.h
@@ -14,7 +14,6 @@
#include <__iterator/concepts.h>
#include <__iterator/readable_traits.h>
#include <__ranges/enable_borrowed_range.h>
-#include <__utility/as_const.h>
#include <__utility/auto_cast.h>
#include <concepts>
#include <type_traits>
diff --git a/libcxx/include/__ranges/copyable_box.h b/libcxx/include/__ranges/copyable_box.h
index 8d43b8884565..d092b7f9ba29 100644
--- a/libcxx/include/__ranges/copyable_box.h
+++ b/libcxx/include/__ranges/copyable_box.h
@@ -41,7 +41,7 @@ namespace ranges {
// Primary template - uses std::optional and introduces an empty state in case assignment fails.
template<__copy_constructible_object _Tp>
class __copyable_box {
- [[no_unique_address]] optional<_Tp> __val_;
+ _LIBCPP_NO_UNIQUE_ADDRESS optional<_Tp> __val_;
public:
template<class ..._Args>
@@ -116,7 +116,7 @@ namespace ranges {
template<__copy_constructible_object _Tp>
requires __doesnt_need_empty_state_for_copy<_Tp> && __doesnt_need_empty_state_for_move<_Tp>
class __copyable_box<_Tp> {
- [[no_unique_address]] _Tp __val_;
+ _LIBCPP_NO_UNIQUE_ADDRESS _Tp __val_;
public:
template<class ..._Args>
diff --git a/libcxx/include/__ranges/drop_view.h b/libcxx/include/__ranges/drop_view.h
index f3701bc71130..64c567416453 100644
--- a/libcxx/include/__ranges/drop_view.h
+++ b/libcxx/include/__ranges/drop_view.h
@@ -45,7 +45,7 @@ namespace ranges {
// one can't call begin() on it more than once.
static constexpr bool _UseCache = forward_range<_View> && !(random_access_range<_View> && sized_range<_View>);
using _Cache = _If<_UseCache, __non_propagating_cache<iterator_t<_View>>, __empty_cache>;
- [[no_unique_address]] _Cache __cached_begin_ = _Cache();
+ _LIBCPP_NO_UNIQUE_ADDRESS _Cache __cached_begin_ = _Cache();
range_difference_t<_View> __count_ = 0;
_View __base_ = _View();
diff --git a/libcxx/include/__ranges/join_view.h b/libcxx/include/__ranges/join_view.h
index 1472924bdb80..4bab8dfeec43 100644
--- a/libcxx/include/__ranges/join_view.h
+++ b/libcxx/include/__ranges/join_view.h
@@ -67,8 +67,8 @@ namespace ranges {
static constexpr bool _UseCache = !is_reference_v<_InnerRange>;
using _Cache = _If<_UseCache, __non_propagating_cache<remove_cvref_t<_InnerRange>>, __empty_cache>;
- [[no_unique_address]] _Cache __cache_;
- _View __base_ = _View(); // TODO: [[no_unique_address]] makes clang crash! File a bug :)
+ _LIBCPP_NO_UNIQUE_ADDRESS _Cache __cache_;
+ _LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
public:
_LIBCPP_HIDE_FROM_ABI
diff --git a/libcxx/include/__ranges/reverse_view.h b/libcxx/include/__ranges/reverse_view.h
index 4da16133088f..c7f1ab8f943b 100644
--- a/libcxx/include/__ranges/reverse_view.h
+++ b/libcxx/include/__ranges/reverse_view.h
@@ -43,8 +43,8 @@ namespace ranges {
// amortized O(1) begin() method.
static constexpr bool _UseCache = !random_access_range<_View> && !common_range<_View>;
using _Cache = _If<_UseCache, __non_propagating_cache<reverse_iterator<iterator_t<_View>>>, __empty_cache>;
- [[no_unique_address]] _Cache __cached_begin_ = _Cache();
- [[no_unique_address]] _View __base_ = _View();
+ _LIBCPP_NO_UNIQUE_ADDRESS _Cache __cached_begin_ = _Cache();
+ _LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
public:
_LIBCPP_HIDE_FROM_ABI
diff --git a/libcxx/include/__ranges/subrange.h b/libcxx/include/__ranges/subrange.h
index 0ea9d7887590..2450e230f281 100644
--- a/libcxx/include/__ranges/subrange.h
+++ b/libcxx/include/__ranges/subrange.h
@@ -82,9 +82,9 @@ namespace ranges {
static constexpr bool _MustProvideSizeAtConstruction = !_StoreSize; // just to improve compiler diagnostics
struct _Empty { constexpr _Empty(auto) noexcept { } };
using _Size = conditional_t<_StoreSize, make_unsigned_t<iter_difference_t<_Iter>>, _Empty>;
- [[no_unique_address]] _Iter __begin_ = _Iter();
- [[no_unique_address]] _Sent __end_ = _Sent();
- [[no_unique_address]] _Size __size_ = 0;
+ _LIBCPP_NO_UNIQUE_ADDRESS _Iter __begin_ = _Iter();
+ _LIBCPP_NO_UNIQUE_ADDRESS _Sent __end_ = _Sent();
+ _LIBCPP_NO_UNIQUE_ADDRESS _Size __size_ = 0;
public:
_LIBCPP_HIDE_FROM_ABI
diff --git a/libcxx/include/__ranges/take_view.h b/libcxx/include/__ranges/take_view.h
index 5f8fbaa0bd8b..de44fc1fae72 100644
--- a/libcxx/include/__ranges/take_view.h
+++ b/libcxx/include/__ranges/take_view.h
@@ -39,7 +39,7 @@ _LIBCPP_BEGIN_NAMESPACE_STD
namespace ranges {
template<view _View>
class take_view : public view_interface<take_view<_View>> {
- [[no_unique_address]] _View __base_ = _View();
+ _LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
range_difference_t<_View> __count_ = 0;
template<bool> class __sentinel;
@@ -136,7 +136,7 @@ namespace ranges {
using _Base = __maybe_const<_Const, _View>;
template<bool _OtherConst>
using _Iter = counted_iterator<iterator_t<__maybe_const<_OtherConst, _View>>>;
- [[no_unique_address]] sentinel_t<_Base> __end_ = sentinel_t<_Base>();
+ _LIBCPP_NO_UNIQUE_ADDRESS sentinel_t<_Base> __end_ = sentinel_t<_Base>();
template<bool>
friend class take_view<_View>::__sentinel;
diff --git a/libcxx/include/__ranges/transform_view.h b/libcxx/include/__ranges/transform_view.h
index a8d2ea3f0ed2..dad46536c75f 100644
--- a/libcxx/include/__ranges/transform_view.h
+++ b/libcxx/include/__ranges/transform_view.h
@@ -61,8 +61,8 @@ class transform_view : public view_interface<transform_view<_View, _Fn>> {
template<bool> class __iterator;
template<bool> class __sentinel;
- [[no_unique_address]] __copyable_box<_Fn> __func_;
- [[no_unique_address]] _View __base_ = _View();
+ _LIBCPP_NO_UNIQUE_ADDRESS __copyable_box<_Fn> __func_;
+ _LIBCPP_NO_UNIQUE_ADDRESS _View __base_ = _View();
public:
_LIBCPP_HIDE_FROM_ABI
diff --git a/libcxx/include/__threading_support b/libcxx/include/__threading_support
index 6a972f89ee7c..11899e6b6e4a 100644
--- a/libcxx/include/__threading_support
+++ b/libcxx/include/__threading_support
@@ -254,7 +254,6 @@ int __libcpp_tls_set(__libcpp_tls_key __key, void *__p);
#if defined(_LIBCPP_HAS_THREAD_API_PTHREAD)
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m)
{
pthread_mutexattr_t attr;
@@ -279,88 +278,74 @@ int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m)
return 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m)
{
return pthread_mutex_lock(__m);
}
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m)
{
return pthread_mutex_trylock(__m) == 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_unlock(__libcpp_recursive_mutex_t *__m)
{
return pthread_mutex_unlock(__m);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m)
{
return pthread_mutex_destroy(__m);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_lock(__libcpp_mutex_t *__m)
{
return pthread_mutex_lock(__m);
}
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m)
{
return pthread_mutex_trylock(__m) == 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_unlock(__libcpp_mutex_t *__m)
{
return pthread_mutex_unlock(__m);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_destroy(__libcpp_mutex_t *__m)
{
return pthread_mutex_destroy(__m);
}
// Condition Variable
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_signal(__libcpp_condvar_t *__cv)
{
return pthread_cond_signal(__cv);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_broadcast(__libcpp_condvar_t *__cv)
{
return pthread_cond_broadcast(__cv);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_wait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m)
{
return pthread_cond_wait(__cv, __m);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m,
__libcpp_timespec_t *__ts)
{
return pthread_cond_timedwait(__cv, __m, __ts);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_destroy(__libcpp_condvar_t *__cv)
{
return pthread_cond_destroy(__cv);
}
// Execute once
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_execute_once(__libcpp_exec_once_flag *flag,
void (*init_routine)()) {
return pthread_once(flag, init_routine);
@@ -368,40 +353,34 @@ int __libcpp_execute_once(__libcpp_exec_once_flag *flag,
// Thread id
// Returns non-zero if the thread ids are equal, otherwise 0
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_id_equal(__libcpp_thread_id t1, __libcpp_thread_id t2)
{
return t1 == t2;
}
// Returns non-zero if t1 < t2, otherwise 0
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2)
{
return t1 < t2;
}
// Thread
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_isnull(const __libcpp_thread_t *__t) {
return __libcpp_thread_get_id(__t) == 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *),
void *__arg)
{
return pthread_create(__t, nullptr, __func, __arg);
}
-_LIBCPP_HIDE_FROM_ABI inline
__libcpp_thread_id __libcpp_thread_get_current_id()
{
const __libcpp_thread_t thread = pthread_self();
return __libcpp_thread_get_id(&thread);
}
-_LIBCPP_HIDE_FROM_ABI inline
__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t)
{
#if defined(__MVS__)
@@ -411,25 +390,21 @@ __libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t)
#endif
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_join(__libcpp_thread_t *__t)
{
return pthread_join(*__t, nullptr);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_detach(__libcpp_thread_t *__t)
{
return pthread_detach(*__t);
}
-_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_yield()
{
sched_yield();
}
-_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
{
__libcpp_timespec_t __ts = _VSTD::__convert_to_timespec<__libcpp_timespec_t>(__ns);
@@ -437,19 +412,16 @@ void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
}
// Thread local storage
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_tls_create(__libcpp_tls_key *__key, void (*__at_exit)(void *))
{
return pthread_key_create(__key, __at_exit);
}
-_LIBCPP_HIDE_FROM_ABI inline
void *__libcpp_tls_get(__libcpp_tls_key __key)
{
return pthread_getspecific(__key);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_tls_set(__libcpp_tls_key __key, void *__p)
{
return pthread_setspecific(__key, __p);
@@ -457,56 +429,47 @@ int __libcpp_tls_set(__libcpp_tls_key __key, void *__p)
#elif defined(_LIBCPP_HAS_THREAD_API_C11)
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_init(__libcpp_recursive_mutex_t *__m)
{
return mtx_init(__m, mtx_plain | mtx_recursive) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_lock(__libcpp_recursive_mutex_t *__m)
{
return mtx_lock(__m) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_recursive_mutex_trylock(__libcpp_recursive_mutex_t *__m)
{
return mtx_trylock(__m) == thrd_success;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_unlock(__libcpp_recursive_mutex_t *__m)
{
return mtx_unlock(__m) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_recursive_mutex_destroy(__libcpp_recursive_mutex_t *__m)
{
mtx_destroy(__m);
return 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_lock(__libcpp_mutex_t *__m)
{
return mtx_lock(__m) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_mutex_trylock(__libcpp_mutex_t *__m)
{
return mtx_trylock(__m) == thrd_success;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_unlock(__libcpp_mutex_t *__m)
{
return mtx_unlock(__m) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_mutex_destroy(__libcpp_mutex_t *__m)
{
mtx_destroy(__m);
@@ -514,25 +477,21 @@ int __libcpp_mutex_destroy(__libcpp_mutex_t *__m)
}
// Condition Variable
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_signal(__libcpp_condvar_t *__cv)
{
return cnd_signal(__cv) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_broadcast(__libcpp_condvar_t *__cv)
{
return cnd_broadcast(__cv) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_wait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m)
{
return cnd_wait(__cv, __m) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m,
timespec *__ts)
{
@@ -540,7 +499,6 @@ int __libcpp_condvar_timedwait(__libcpp_condvar_t *__cv, __libcpp_mutex_t *__m,
return __ec == thrd_timedout ? ETIMEDOUT : __ec;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_condvar_destroy(__libcpp_condvar_t *__cv)
{
cnd_destroy(__cv);
@@ -548,7 +506,6 @@ int __libcpp_condvar_destroy(__libcpp_condvar_t *__cv)
}
// Execute once
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_execute_once(__libcpp_exec_once_flag *flag,
void (*init_routine)(void)) {
::call_once(flag, init_routine);
@@ -557,26 +514,22 @@ int __libcpp_execute_once(__libcpp_exec_once_flag *flag,
// Thread id
// Returns non-zero if the thread ids are equal, otherwise 0
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_id_equal(__libcpp_thread_id t1, __libcpp_thread_id t2)
{
return thrd_equal(t1, t2) != 0;
}
// Returns non-zero if t1 < t2, otherwise 0
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_id_less(__libcpp_thread_id t1, __libcpp_thread_id t2)
{
return t1 < t2;
}
// Thread
-_LIBCPP_HIDE_FROM_ABI inline
bool __libcpp_thread_isnull(const __libcpp_thread_t *__t) {
return __libcpp_thread_get_id(__t) == 0;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *),
void *__arg)
{
@@ -584,37 +537,31 @@ int __libcpp_thread_create(__libcpp_thread_t *__t, void *(*__func)(void *),
return __ec == thrd_nomem ? ENOMEM : __ec;
}
-_LIBCPP_HIDE_FROM_ABI inline
__libcpp_thread_id __libcpp_thread_get_current_id()
{
return thrd_current();
}
-_LIBCPP_HIDE_FROM_ABI inline
__libcpp_thread_id __libcpp_thread_get_id(const __libcpp_thread_t *__t)
{
return *__t;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_join(__libcpp_thread_t *__t)
{
return thrd_join(*__t, nullptr) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_thread_detach(__libcpp_thread_t *__t)
{
return thrd_detach(*__t) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_yield()
{
thrd_yield();
}
-_LIBCPP_HIDE_FROM_ABI inline
void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
{
__libcpp_timespec_t __ts = _VSTD::__convert_to_timespec<__libcpp_timespec_t>(__ns);
@@ -622,19 +569,16 @@ void __libcpp_thread_sleep_for(const chrono::nanoseconds& __ns)
}
// Thread local storage
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_tls_create(__libcpp_tls_key *__key, void (*__at_exit)(void *))
{
return tss_create(__key, __at_exit) == thrd_success ? 0 : EINVAL;
}
-_LIBCPP_HIDE_FROM_ABI inline
void *__libcpp_tls_get(__libcpp_tls_key __key)
{
return tss_get(__key);
}
-_LIBCPP_HIDE_FROM_ABI inline
int __libcpp_tls_set(__libcpp_tls_key __key, void *__p)
{
return tss_set(__key, __p) == thrd_success ? 0 : EINVAL;
diff --git a/libcxx/include/bitset b/libcxx/include/bitset
index cae001a60ec5..fa37e701345b 100644
--- a/libcxx/include/bitset
+++ b/libcxx/include/bitset
@@ -114,7 +114,6 @@ template <size_t N> struct hash<std::bitset<N>>;
#include <__bit_reference>
#include <__config>
-#include <__functional_base>
#include <climits>
#include <cstddef>
#include <iosfwd>
diff --git a/libcxx/include/experimental/__memory b/libcxx/include/experimental/__memory
index 40021691d5ae..749cf4c0c657 100644
--- a/libcxx/include/experimental/__memory
+++ b/libcxx/include/experimental/__memory
@@ -10,7 +10,6 @@
#ifndef _LIBCPP_EXPERIMENTAL___MEMORY
#define _LIBCPP_EXPERIMENTAL___MEMORY
-#include <__functional_base>
#include <__memory/allocator_arg_t.h>
#include <__memory/uses_allocator.h>
#include <experimental/__config>
diff --git a/libcxx/include/iterator b/libcxx/include/iterator
index da920f3d1c0e..202667809dca 100644
--- a/libcxx/include/iterator
+++ b/libcxx/include/iterator
@@ -598,7 +598,6 @@ template <class E> constexpr const E* data(initializer_list<E> il) noexcept;
#include <__config>
#include <__debug>
-#include <__functional_base>
#include <__iterator/access.h>
#include <__iterator/advance.h>
#include <__iterator/back_insert_iterator.h>
@@ -643,6 +642,19 @@ template <class E> constexpr const E* data(initializer_list<E> il) noexcept;
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/unary_function.h>
+#include <__functional/weak_result_type.h>
+#include <__memory/allocator_arg_t.h>
+#include <__memory/uses_allocator.h>
+#include <exception>
+#include <new>
+#include <typeinfo>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/memory b/libcxx/include/memory
index 0d3f5a555573..3bc31d29ed6c 100644
--- a/libcxx/include/memory
+++ b/libcxx/include/memory
@@ -805,7 +805,6 @@ void* align(size_t alignment, size_t size, void*& ptr, size_t& space);
*/
#include <__config>
-#include <__functional_base>
#include <__memory/addressof.h>
#include <__memory/allocation_guard.h>
#include <__memory/allocator.h>
@@ -837,6 +836,14 @@ void* align(size_t alignment, size_t size, void*& ptr, size_t& space);
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/unary_function.h>
+#include <__functional/weak_result_type.h>
+
#if _LIBCPP_STD_VER <= 14 || defined(_LIBCPP_ENABLE_CXX17_REMOVED_AUTO_PTR)
# include <__memory/auto_ptr.h>
#endif
diff --git a/libcxx/include/module.modulemap b/libcxx/include/module.modulemap
index 5c21e5dba4b2..6a55bf3f42d7 100644
--- a/libcxx/include/module.modulemap
+++ b/libcxx/include/module.modulemap
@@ -2,8 +2,9 @@
// since __config may be included from C headers which may create an
// include cycle.
module std_config [system] [extern_c] {
- textual header "__config"
- textual header "__config_site"
+ header "__config"
+ header "__config_site"
+ export *
}
module std [system] {
diff --git a/libcxx/include/optional b/libcxx/include/optional
index 236842d627d9..1599efdd3206 100644
--- a/libcxx/include/optional
+++ b/libcxx/include/optional
@@ -162,7 +162,6 @@ template<class T>
#include <__concepts/invocable.h>
#include <__config>
#include <__debug>
-#include <__functional_base>
#include <compare>
#include <functional>
#include <initializer_list>
@@ -172,6 +171,11 @@ template<class T>
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__memory/allocator_arg_t.h>
+#include <__memory/uses_allocator.h>
+#include <typeinfo>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/string b/libcxx/include/string
index 4f3815f53e7b..fa42edde0aa1 100644
--- a/libcxx/include/string
+++ b/libcxx/include/string
@@ -520,7 +520,6 @@ basic_string<char32_t> operator "" s( const char32_t *str, size_t len ); // C++1
#include <__config>
#include <__debug>
-#include <__functional_base>
#include <__ios/fpos.h>
#include <__iterator/wrap_iter.h>
#include <algorithm>
@@ -538,6 +537,16 @@ basic_string<char32_t> operator "" s( const char32_t *str, size_t len ); // C++1
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/unary_function.h>
+#include <__functional/weak_result_type.h>
+#include <new>
+#include <typeinfo>
+
#ifndef _LIBCPP_HAS_NO_WIDE_CHARACTERS
# include <cwchar>
#endif
diff --git a/libcxx/include/system_error b/libcxx/include/system_error
index 78dddc93059e..66a3f3c1e0b7 100644
--- a/libcxx/include/system_error
+++ b/libcxx/include/system_error
@@ -145,7 +145,6 @@ template <> struct hash<std::error_condition>;
#include <__config>
#include <__errc>
#include <__functional/unary_function.h>
-#include <__functional_base>
#include <compare>
#include <stdexcept>
#include <string>
diff --git a/libcxx/include/thread b/libcxx/include/thread
index 8af1e0f99473..5681f9d97386 100644
--- a/libcxx/include/thread
+++ b/libcxx/include/thread
@@ -84,7 +84,6 @@ void sleep_for(const chrono::duration<Rep, Period>& rel_time);
#include <__config>
#include <__debug>
-#include <__functional_base>
#include <__mutex_base>
#include <__thread/poll_with_backoff.h>
#include <__thread/timed_backoff_policy.h>
diff --git a/libcxx/include/tuple b/libcxx/include/tuple
index 046a242befdc..a28a2e81db3e 100644
--- a/libcxx/include/tuple
+++ b/libcxx/include/tuple
@@ -169,7 +169,6 @@ template <class... Types>
#include <__compare/synth_three_way.h>
#include <__config>
#include <__functional/unwrap_ref.h>
-#include <__functional_base>
#include <__memory/allocator_arg_t.h>
#include <__memory/uses_allocator.h>
#include <__tuple>
@@ -182,6 +181,17 @@ template <class... Types>
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/unary_function.h>
+#include <__functional/weak_result_type.h>
+#include <exception>
+#include <new>
+#include <typeinfo>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/typeindex b/libcxx/include/typeindex
index c018b3c9f874..b5dcd8496ae0 100644
--- a/libcxx/include/typeindex
+++ b/libcxx/include/typeindex
@@ -46,11 +46,21 @@ struct hash<type_index>
#include <__config>
#include <__functional/unary_function.h>
-#include <__functional_base>
#include <compare>
#include <typeinfo>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/weak_result_type.h>
+#include <__memory/allocator_arg_t.h>
+#include <__memory/uses_allocator.h>
+#include <new>
+#include <utility>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/variant b/libcxx/include/variant
index 38daf672c41d..d102196f60a2 100644
--- a/libcxx/include/variant
+++ b/libcxx/include/variant
@@ -202,6 +202,8 @@ namespace std {
#include <__availability>
#include <__config>
#include <__functional/hash.h>
+#include <__functional/operations.h>
+#include <__functional/unary_function.h>
#include <__tuple>
#include <__utility/forward.h>
#include <__variant/monostate.h>
@@ -215,6 +217,15 @@ namespace std {
#include <utility>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/weak_result_type.h>
+#include <__memory/allocator_arg_t.h>
+#include <__memory/uses_allocator.h>
+#include <typeinfo>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/include/vector b/libcxx/include/vector
index f92caf941e8d..43584e0675e3 100644
--- a/libcxx/include/vector
+++ b/libcxx/include/vector
@@ -274,7 +274,6 @@ erase_if(vector<T, Allocator>& c, Predicate pred); // C++20
#include <__bit_reference>
#include <__config>
#include <__debug>
-#include <__functional_base>
#include <__iterator/iterator_traits.h>
#include <__iterator/wrap_iter.h>
#include <__split_buffer>
@@ -292,6 +291,16 @@ erase_if(vector<T, Allocator>& c, Predicate pred); // C++20
#include <type_traits>
#include <version>
+// TODO: remove these headers
+#include <__functional/binary_function.h>
+#include <__functional/invoke.h>
+#include <__functional/operations.h>
+#include <__functional/reference_wrapper.h>
+#include <__functional/unary_function.h>
+#include <__functional/weak_result_type.h>
+#include <typeinfo>
+#include <utility>
+
#if !defined(_LIBCPP_HAS_NO_PRAGMA_SYSTEM_HEADER)
# pragma GCC system_header
#endif
diff --git a/libcxx/test/libcxx/utilities/format/format.string/format.string.std/std_format_spec_bool.pass.cpp b/libcxx/test/libcxx/utilities/format/format.string/format.string.std/std_format_spec_bool.pass.cpp
index 6d233f80ac53..772cffbc33d4 100644
--- a/libcxx/test/libcxx/utilities/format/format.string/format.string.std/std_format_spec_bool.pass.cpp
+++ b/libcxx/test/libcxx/utilities/format/format.string/format.string.std/std_format_spec_bool.pass.cpp
@@ -197,76 +197,6 @@ constexpr void test_as_string() {
}
template <class CharT>
-constexpr void test_as_char() {
-
- test({.type = _Flags::_Type::__char}, 1, CSTR("c}"));
-
- // *** Align-fill ***
- test({.alignment = _Flags::_Alignment::__left, .type = _Flags::_Type::__char},
- 2, CSTR("<c}"));
- test({.alignment = _Flags::_Alignment::__center,
- .type = _Flags::_Type::__char},
- 2, "^c}");
- test(
- {.alignment = _Flags::_Alignment::__right, .type = _Flags::_Type::__char},
- 2, ">c}");
-
- test({.fill = CharT('L'),
- .alignment = _Flags::_Alignment::__left,
- .type = _Flags::_Type::__char},
- 3, CSTR("L<c}"));
- test({.fill = CharT('#'),
- .alignment = _Flags::_Alignment::__center,
- .type = _Flags::_Type::__char},
- 3, CSTR("#^c}"));
- test({.fill = CharT('0'),
- .alignment = _Flags::_Alignment::__right,
- .type = _Flags::_Type::__char},
- 3, CSTR("0>c}"));
-
- // *** Sign ***
- test_exception<Parser<CharT>>(
- "A sign field isn't allowed in this format-spec", CSTR("-c}"));
-
- // *** Alternate form ***
- test_exception<Parser<CharT>>(
- "An alternate form field isn't allowed in this format-spec", CSTR("#c}"));
-
- // *** Zero padding ***
- test_exception<Parser<CharT>>(
- "A zero-padding field isn't allowed in this format-spec", CSTR("0c}"));
-
- // *** Width ***
- test({.width = 0, .width_as_arg = false, .type = _Flags::_Type::__char}, 1,
- CSTR("c}"));
- test({.width = 1, .width_as_arg = false, .type = _Flags::_Type::__char}, 2,
- CSTR("1c}"));
- test({.width = 10, .width_as_arg = false, .type = _Flags::_Type::__char}, 3,
- CSTR("10c}"));
- test({.width = 1000, .width_as_arg = false, .type = _Flags::_Type::__char}, 5,
- CSTR("1000c}"));
- test({.width = 1000000, .width_as_arg = false, .type = _Flags::_Type::__char},
- 8, CSTR("1000000c}"));
-
- test({.width = 0, .width_as_arg = true, .type = _Flags::_Type::__char}, 3,
- CSTR("{}c}"));
- test({.width = 0, .width_as_arg = true, .type = _Flags::_Type::__char}, 4,
- CSTR("{0}c}"));
- test({.width = 1, .width_as_arg = true, .type = _Flags::_Type::__char}, 4,
- CSTR("{1}c}"));
-
- // *** Precision ***
- test_exception<Parser<CharT>>(
- "The format-spec should consume the input or end with a '}'", CSTR("."));
- test_exception<Parser<CharT>>(
- "The format-spec should consume the input or end with a '}'", CSTR(".1"));
-
- // *** Locale-specific form ***
- test({.locale_specific_form = true, .type = _Flags::_Type::__char}, 2,
- CSTR("Lc}"));
-}
-
-template <class CharT>
constexpr void test_as_integer() {
test({.alignment = _Flags::_Alignment::__right,
@@ -401,7 +331,6 @@ constexpr void test() {
test({}, 0, CSTR("}"));
test_as_string<CharT>();
- test_as_char<CharT>();
test_as_integer<CharT>();
// *** Type ***
@@ -413,6 +342,7 @@ constexpr void test() {
test_exception<Parser<CharT>>(expected, CSTR("F}"));
test_exception<Parser<CharT>>(expected, CSTR("G}"));
test_exception<Parser<CharT>>(expected, CSTR("a}"));
+ test_exception<Parser<CharT>>(expected, CSTR("c}"));
test_exception<Parser<CharT>>(expected, CSTR("e}"));
test_exception<Parser<CharT>>(expected, CSTR("f}"));
test_exception<Parser<CharT>>(expected, CSTR("g}"));
diff --git a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp
index 4b3b8e59cc10..296298cfd31f 100644
--- a/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp
+++ b/libcxx/test/std/atomics/atomics.types.generic/trivially_copyable.pass.cpp
@@ -18,8 +18,10 @@
#include <cassert>
#include <chrono> // for nanoseconds
-#ifndef _LIBCPP_HAS_NO_THREADS
-# include <thread> // for thread_id
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include <thread> // for thread_id
#endif
struct TriviallyCopyable {
@@ -35,7 +37,7 @@ void test(T t) {
int main(int, char**) {
test(TriviallyCopyable(42));
test(std::chrono::nanoseconds(2));
-#ifndef _LIBCPP_HAS_NO_THREADS
+#ifndef TEST_HAS_NO_THREADS
test(std::this_thread::get_id());
#endif
diff --git a/libcxx/test/std/atomics/types.pass.cpp b/libcxx/test/std/atomics/types.pass.cpp
index 401f9328b2fa..f95f7a4c1d47 100644
--- a/libcxx/test/std/atomics/types.pass.cpp
+++ b/libcxx/test/std/atomics/types.pass.cpp
@@ -22,12 +22,12 @@
#include <memory>
#include <type_traits>
-#ifndef _LIBCPP_HAS_NO_THREADS
-# include <thread>
-#endif
-
#include "test_macros.h"
+#ifndef TEST_HAS_NO_THREADS
+# include <thread>
+#endif
+
template <class A, bool Integral>
struct test_atomic
{
@@ -167,7 +167,7 @@ int main(int, char**)
test<LargeTriviallyCopyable>();
#endif
-#ifndef _LIBCPP_HAS_NO_THREADS
+#ifndef TEST_HAS_NO_THREADS
test<std::thread::id>();
#endif
test<std::chrono::nanoseconds>();
diff --git a/libcxx/test/std/concepts/concepts.compare/concept.equalitycomparable/equality_comparable_with.compile.pass.cpp b/libcxx/test/std/concepts/concepts.compare/concept.equalitycomparable/equality_comparable_with.compile.pass.cpp
index 1f82b8a0d6b0..df681a7caa23 100644
--- a/libcxx/test/std/concepts/concepts.compare/concept.equalitycomparable/equality_comparable_with.compile.pass.cpp
+++ b/libcxx/test/std/concepts/concepts.compare/concept.equalitycomparable/equality_comparable_with.compile.pass.cpp
@@ -26,8 +26,10 @@
#include <unordered_set>
#include <vector>
-#ifndef _LIBCPP_HAS_NO_THREADS
-# include <mutex>
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include <mutex>
#endif
#include "compare_types.h"
@@ -1058,7 +1060,7 @@ static_assert(
static_assert(
!check_equality_comparable_with<std::list<int>, std::vector<int> >());
-#ifndef _LIBCPP_HAS_NO_THREADS
+#ifndef TEST_HAS_NO_THREADS
static_assert(!check_equality_comparable_with<std::lock_guard<std::mutex>,
std::lock_guard<std::mutex> >());
static_assert(!check_equality_comparable_with<std::lock_guard<std::mutex>,
diff --git a/libcxx/test/std/concepts/concepts.object/movable.compile.pass.cpp b/libcxx/test/std/concepts/concepts.object/movable.compile.pass.cpp
index fff78cfda3f8..1b7b9275ffd9 100644
--- a/libcxx/test/std/concepts/concepts.object/movable.compile.pass.cpp
+++ b/libcxx/test/std/concepts/concepts.object/movable.compile.pass.cpp
@@ -22,8 +22,10 @@
#include <unordered_map>
#include <vector>
-#ifndef _LIBCPP_HAS_NO_THREADS
-# include <mutex>
+#include "test_macros.h"
+
+#ifndef TEST_HAS_NO_THREADS
+# include <mutex>
#endif
#include "type_classification/moveconstructible.h"
diff --git a/libcxx/test/std/language.support/cmp/cmp.concept/three_way_comparable.compile.pass.cpp b/libcxx/test/std/language.support/cmp/cmp.concept/three_way_comparable.compile.pass.cpp
index 1fd2a171d807..b65b77e96ef6 100644
--- a/libcxx/test/std/language.support/cmp/cmp.concept/three_way_comparable.compile.pass.cpp
+++ b/libcxx/test/std/language.support/cmp/cmp.concept/three_way_comparable.compile.pass.cpp
@@ -27,7 +27,7 @@ static_assert(std::three_way_comparable<char const*>);
static_assert(std::three_way_comparable<char volatile*>);
static_assert(std::three_way_comparable<char const volatile*>);
static_assert(std::three_way_comparable<wchar_t&>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(std::three_way_comparable<char8_t const&>);
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/numerics/bit/bit.pow.two/bit_ceil.pass.cpp b/libcxx/test/std/numerics/bit/bit.pow.two/bit_ceil.pass.cpp
index e1dd88e85bf3..f18d52a0ed57 100644
--- a/libcxx/test/std/numerics/bit/bit.pow.two/bit_ceil.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bit.pow.two/bit_ceil.pass.cpp
@@ -95,7 +95,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bit.pow.two/bit_floor.pass.cpp b/libcxx/test/std/numerics/bit/bit.pow.two/bit_floor.pass.cpp
index 045b1cc9a7ad..f89029017fa8 100644
--- a/libcxx/test/std/numerics/bit/bit.pow.two/bit_floor.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bit.pow.two/bit_floor.pass.cpp
@@ -91,7 +91,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bit.pow.two/bit_width.pass.cpp b/libcxx/test/std/numerics/bit/bit.pow.two/bit_width.pass.cpp
index 43f47d36bfe9..84b00efa7ed4 100644
--- a/libcxx/test/std/numerics/bit/bit.pow.two/bit_width.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bit.pow.two/bit_width.pass.cpp
@@ -96,7 +96,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bit.pow.two/has_single_bit.pass.cpp b/libcxx/test/std/numerics/bit/bit.pow.two/has_single_bit.pass.cpp
index 687f9ad45ff1..35343c554240 100644
--- a/libcxx/test/std/numerics/bit/bit.pow.two/has_single_bit.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bit.pow.two/has_single_bit.pass.cpp
@@ -94,7 +94,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.count/countl_one.pass.cpp b/libcxx/test/std/numerics/bit/bitops.count/countl_one.pass.cpp
index 23196aa6ecf3..9ee60a210d82 100644
--- a/libcxx/test/std/numerics/bit/bitops.count/countl_one.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.count/countl_one.pass.cpp
@@ -91,7 +91,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.count/countl_zero.pass.cpp b/libcxx/test/std/numerics/bit/bitops.count/countl_zero.pass.cpp
index 51675808f834..87d5cf2b7be5 100644
--- a/libcxx/test/std/numerics/bit/bitops.count/countl_zero.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.count/countl_zero.pass.cpp
@@ -90,7 +90,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.count/countr_one.pass.cpp b/libcxx/test/std/numerics/bit/bitops.count/countr_one.pass.cpp
index 448fdcbf12fe..e57125e7e701 100644
--- a/libcxx/test/std/numerics/bit/bitops.count/countr_one.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.count/countr_one.pass.cpp
@@ -95,7 +95,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.count/countr_zero.pass.cpp b/libcxx/test/std/numerics/bit/bitops.count/countr_zero.pass.cpp
index ffe5031d7e4d..e8aa7f777acd 100644
--- a/libcxx/test/std/numerics/bit/bitops.count/countr_zero.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.count/countr_zero.pass.cpp
@@ -92,7 +92,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.count/popcount.pass.cpp b/libcxx/test/std/numerics/bit/bitops.count/popcount.pass.cpp
index 3dd994b0868a..3be240fb25a5 100644
--- a/libcxx/test/std/numerics/bit/bitops.count/popcount.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.count/popcount.pass.cpp
@@ -102,7 +102,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.rot/rotl.pass.cpp b/libcxx/test/std/numerics/bit/bitops.rot/rotl.pass.cpp
index 1432aee7fe73..78fac6267d18 100644
--- a/libcxx/test/std/numerics/bit/bitops.rot/rotl.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.rot/rotl.pass.cpp
@@ -90,7 +90,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/numerics/bit/bitops.rot/rotr.pass.cpp b/libcxx/test/std/numerics/bit/bitops.rot/rotr.pass.cpp
index 8c8505a2d588..f930f40c5d18 100644
--- a/libcxx/test/std/numerics/bit/bitops.rot/rotr.pass.cpp
+++ b/libcxx/test/std/numerics/bit/bitops.rot/rotr.pass.cpp
@@ -91,7 +91,7 @@ int main(int, char**)
static_assert(!std::is_invocable_v<L, bool>);
static_assert(!std::is_invocable_v<L, char>);
static_assert(!std::is_invocable_v<L, wchar_t>);
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(!std::is_invocable_v<L, char8_t>);
#endif
static_assert(!std::is_invocable_v<L, char16_t>);
diff --git a/libcxx/test/std/ranges/range.access/begin.pass.cpp b/libcxx/test/std/ranges/range.access/begin.pass.cpp
index 7014681ccabf..03a229470877 100644
--- a/libcxx/test/std/ranges/range.access/begin.pass.cpp
+++ b/libcxx/test/std/ranges/range.access/begin.pass.cpp
@@ -16,6 +16,7 @@
#include <ranges>
#include <cassert>
+#include <utility>
#include "test_macros.h"
#include "test_iterators.h"
@@ -28,6 +29,10 @@ static_assert(!std::is_invocable_v<RangeBeginT, int (&&)[10]>);
static_assert( std::is_invocable_v<RangeBeginT, int (&)[10]>);
static_assert(!std::is_invocable_v<RangeBeginT, int (&&)[]>);
static_assert( std::is_invocable_v<RangeBeginT, int (&)[]>);
+static_assert(!std::is_invocable_v<RangeCBeginT, int (&&)[10]>);
+static_assert( std::is_invocable_v<RangeCBeginT, int (&)[10]>);
+static_assert(!std::is_invocable_v<RangeCBeginT, int (&&)[]>);
+static_assert( std::is_invocable_v<RangeCBeginT, int (&)[]>);
struct Incomplete;
static_assert(!std::is_invocable_v<RangeBeginT, Incomplete(&&)[]>);
@@ -105,12 +110,6 @@ constexpr bool testArray() {
return true;
}
-struct BeginMemberFunction {
- int x;
- constexpr const int *begin() const { return &x; }
- friend int *begin(BeginMemberFunction const&);
-};
-
struct BeginMemberReturnsInt {
int begin() const;
};
@@ -127,12 +126,6 @@ struct EmptyBeginMember {
};
static_assert(!std::is_invocable_v<RangeBeginT, EmptyBeginMember const&>);
-struct EmptyPtrBeginMember {
- struct Empty {};
- Empty x;
- constexpr const Empty *begin() const { return &x; }
-};
-
struct PtrConvertibleBeginMember {
struct iterator { operator int*() const; };
iterator begin() const;
@@ -154,6 +147,18 @@ struct EnabledBorrowingBeginMember {
template<>
inline constexpr bool std::ranges::enable_borrowed_range<EnabledBorrowingBeginMember> = true;
+struct BeginMemberFunction {
+ int x;
+ constexpr const int *begin() const { return &x; }
+ friend int *begin(BeginMemberFunction const&);
+};
+
+struct EmptyPtrBeginMember {
+ struct Empty {};
+ Empty x;
+ constexpr const Empty *begin() const { return &x; }
+};
+
constexpr bool testBeginMember() {
BeginMember a;
assert(std::ranges::begin(a) == &a.x);
@@ -193,24 +198,21 @@ static_assert(!std::is_invocable_v<RangeBeginT, BeginFunction &>);
static_assert( std::is_invocable_v<RangeCBeginT, BeginFunction const&>);
static_assert( std::is_invocable_v<RangeCBeginT, BeginFunction &>);
-struct BeginFunctionWithDataMember {
- int x;
- int begin;
- friend constexpr const int *begin(BeginFunctionWithDataMember const& bf) { return &bf.x; }
+struct BeginFunctionReturnsInt {
+ friend int begin(BeginFunctionReturnsInt const&);
};
+static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsInt const&>);
-struct BeginFunctionWithPrivateBeginMember {
- int y;
- friend constexpr const int *begin(BeginFunctionWithPrivateBeginMember const& bf) { return &bf.y; }
-private:
- const int *begin() const;
+struct BeginFunctionReturnsVoidPtr {
+ friend void *begin(BeginFunctionReturnsVoidPtr const&);
};
+static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsVoidPtr const&>);
-struct BeginFunctionReturnsEmptyPtr {
- struct Empty {};
- Empty x;
- friend constexpr const Empty *begin(BeginFunctionReturnsEmptyPtr const& bf) { return &bf.x; }
+struct BeginFunctionReturnsPtrConvertible {
+ struct iterator { operator int*() const; };
+ friend iterator begin(BeginFunctionReturnsPtrConvertible const&);
};
+static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsPtrConvertible const&>);
struct BeginFunctionByValue {
friend constexpr int *begin(BeginFunctionByValue) { return &globalBuff[1]; }
@@ -223,27 +225,24 @@ struct BeginFunctionEnabledBorrowing {
template<>
inline constexpr bool std::ranges::enable_borrowed_range<BeginFunctionEnabledBorrowing> = true;
-struct BeginFunctionReturnsInt {
- friend int begin(BeginFunctionReturnsInt const&);
-};
-static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsInt const&>);
-
-struct BeginFunctionReturnsVoidPtr {
- friend void *begin(BeginFunctionReturnsVoidPtr const&);
+struct BeginFunctionReturnsEmptyPtr {
+ struct Empty {};
+ Empty x;
+ friend constexpr const Empty *begin(BeginFunctionReturnsEmptyPtr const& bf) { return &bf.x; }
};
-static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsVoidPtr const&>);
-struct BeginFunctionReturnsEmpty {
- struct Empty {};
- friend Empty begin(BeginFunctionReturnsEmpty const&);
+struct BeginFunctionWithDataMember {
+ int x;
+ int begin;
+ friend constexpr const int *begin(BeginFunctionWithDataMember const& bf) { return &bf.x; }
};
-static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsEmpty const&>);
-struct BeginFunctionReturnsPtrConvertible {
- struct iterator { operator int*() const; };
- friend iterator begin(BeginFunctionReturnsPtrConvertible const&);
+struct BeginFunctionWithPrivateBeginMember {
+ int y;
+ friend constexpr const int *begin(BeginFunctionWithPrivateBeginMember const& bf) { return &bf.y; }
+private:
+ const int *begin() const;
};
-static_assert(!std::is_invocable_v<RangeBeginT, BeginFunctionReturnsPtrConvertible const&>);
constexpr bool testBeginFunction() {
BeginFunction a{};
diff --git a/libcxx/test/std/ranges/range.access/end.pass.cpp b/libcxx/test/std/ranges/range.access/end.pass.cpp
index 5f4f05279cdd..ad01b4a16ac7 100644
--- a/libcxx/test/std/ranges/range.access/end.pass.cpp
+++ b/libcxx/test/std/ranges/range.access/end.pass.cpp
@@ -16,6 +16,7 @@
#include <ranges>
#include <cassert>
+#include <utility>
#include "test_macros.h"
#include "test_iterators.h"
@@ -28,6 +29,10 @@ static_assert(!std::is_invocable_v<RangeEndT, int (&&)[]>);
static_assert(!std::is_invocable_v<RangeEndT, int (&)[]>);
static_assert(!std::is_invocable_v<RangeEndT, int (&&)[10]>);
static_assert( std::is_invocable_v<RangeEndT, int (&)[10]>);
+static_assert(!std::is_invocable_v<RangeCEndT, int (&&)[]>);
+static_assert(!std::is_invocable_v<RangeCEndT, int (&)[]>);
+static_assert(!std::is_invocable_v<RangeCEndT, int (&&)[10]>);
+static_assert( std::is_invocable_v<RangeCEndT, int (&)[10]>);
struct Incomplete;
static_assert(!std::is_invocable_v<RangeEndT, Incomplete(&&)[]>);
@@ -91,40 +96,18 @@ constexpr bool testArray() {
return true;
}
-struct EndMemberFunction {
- int x;
- constexpr const int *begin() const { return nullptr; }
- constexpr const int *end() const { return &x; }
- friend constexpr int *end(EndMemberFunction const&);
-};
-
struct EndMemberReturnsInt {
int begin() const;
int end() const;
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndMemberReturnsInt const&>);
struct EndMemberReturnsVoidPtr {
const void *begin() const;
const void *end() const;
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndMemberReturnsVoidPtr const&>);
-struct Empty { };
-struct EmptyEndMember {
- Empty begin() const;
- Empty end() const;
-};
-struct EmptyPtrEndMember {
- Empty x;
- constexpr const Empty *begin() const { return nullptr; }
- constexpr const Empty *end() const { return &x; }
-};
-
-static_assert(!std::is_invocable_v<RangeEndT, EmptyEndMember const&>);
-
struct PtrConvertible {
operator int*() const;
};
@@ -132,13 +115,11 @@ struct PtrConvertibleEndMember {
PtrConvertible begin() const;
PtrConvertible end() const;
};
-
static_assert(!std::is_invocable_v<RangeEndT, PtrConvertibleEndMember const&>);
struct NoBeginMember {
constexpr const int *end();
};
-
static_assert(!std::is_invocable_v<RangeEndT, NoBeginMember const&>);
struct NonConstEndMember {
@@ -146,7 +127,6 @@ struct NonConstEndMember {
constexpr int *begin() { return nullptr; }
constexpr int *end() { return &x; }
};
-
static_assert( std::is_invocable_v<RangeEndT, NonConstEndMember &>);
static_assert(!std::is_invocable_v<RangeEndT, NonConstEndMember const&>);
static_assert(!std::is_invocable_v<RangeCEndT, NonConstEndMember &>);
@@ -160,6 +140,26 @@ struct EnabledBorrowingEndMember {
template<>
inline constexpr bool std::ranges::enable_borrowed_range<EnabledBorrowingEndMember> = true;
+struct EndMemberFunction {
+ int x;
+ constexpr const int *begin() const { return nullptr; }
+ constexpr const int *end() const { return &x; }
+ friend constexpr int *end(EndMemberFunction const&);
+};
+
+struct Empty { };
+struct EmptyEndMember {
+ Empty begin() const;
+ Empty end() const;
+};
+static_assert(!std::is_invocable_v<RangeEndT, EmptyEndMember const&>);
+
+struct EmptyPtrEndMember {
+ Empty x;
+ constexpr const Empty *begin() const { return nullptr; }
+ constexpr const Empty *end() const { return &x; }
+};
+
constexpr bool testEndMember() {
EndMember a;
assert(std::ranges::end(a) == &a.x);
@@ -199,74 +199,69 @@ static_assert(!std::is_invocable_v<RangeEndT, EndFunction &>);
static_assert( std::is_invocable_v<RangeCEndT, EndFunction const&>);
static_assert( std::is_invocable_v<RangeCEndT, EndFunction &>);
-struct EndFunctionWithDataMember {
- int x;
- int end;
- friend constexpr const int *begin(EndFunctionWithDataMember const&) { return nullptr; }
- friend constexpr const int *end(EndFunctionWithDataMember const& bf) { return &bf.x; }
-};
-
-struct EndFunctionWithPrivateEndMember : private EndMember {
- int y;
- friend constexpr const int *begin(EndFunctionWithPrivateEndMember const&) { return nullptr; }
- friend constexpr const int *end(EndFunctionWithPrivateEndMember const& bf) { return &bf.y; }
-};
-
-struct EndFunctionReturnsEmptyPtr {
- Empty x;
- friend constexpr const Empty *begin(EndFunctionReturnsEmptyPtr const&) { return nullptr; }
- friend constexpr const Empty *end(EndFunctionReturnsEmptyPtr const& bf) { return &bf.x; }
-};
-
-struct EndFunctionByValue {
- friend constexpr int *begin(EndFunctionByValue) { return nullptr; }
- friend constexpr int *end(EndFunctionByValue) { return &globalBuff[1]; }
-};
-
-static_assert(!std::is_invocable_v<RangeCEndT, EndFunctionByValue>);
-
-struct EndFunctionEnabledBorrowing {
- friend constexpr int *begin(EndFunctionEnabledBorrowing) { return nullptr; }
- friend constexpr int *end(EndFunctionEnabledBorrowing) { return &globalBuff[2]; }
-};
-
-template<>
-inline constexpr bool std::ranges::enable_borrowed_range<EndFunctionEnabledBorrowing> = true;
-
struct EndFunctionReturnsInt {
friend constexpr int begin(EndFunctionReturnsInt const&);
friend constexpr int end(EndFunctionReturnsInt const&);
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndFunctionReturnsInt const&>);
struct EndFunctionReturnsVoidPtr {
friend constexpr void *begin(EndFunctionReturnsVoidPtr const&);
friend constexpr void *end(EndFunctionReturnsVoidPtr const&);
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndFunctionReturnsVoidPtr const&>);
struct EndFunctionReturnsEmpty {
friend constexpr Empty begin(EndFunctionReturnsEmpty const&);
friend constexpr Empty end(EndFunctionReturnsEmpty const&);
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndFunctionReturnsEmpty const&>);
struct EndFunctionReturnsPtrConvertible {
friend constexpr PtrConvertible begin(EndFunctionReturnsPtrConvertible const&);
friend constexpr PtrConvertible end(EndFunctionReturnsPtrConvertible const&);
};
-
static_assert(!std::is_invocable_v<RangeEndT, EndFunctionReturnsPtrConvertible const&>);
struct NoBeginFunction {
friend constexpr const int *end(NoBeginFunction const&);
};
-
static_assert(!std::is_invocable_v<RangeEndT, NoBeginFunction const&>);
+struct EndFunctionByValue {
+ friend constexpr int *begin(EndFunctionByValue) { return nullptr; }
+ friend constexpr int *end(EndFunctionByValue) { return &globalBuff[1]; }
+};
+static_assert(!std::is_invocable_v<RangeCEndT, EndFunctionByValue>);
+
+struct EndFunctionEnabledBorrowing {
+ friend constexpr int *begin(EndFunctionEnabledBorrowing) { return nullptr; }
+ friend constexpr int *end(EndFunctionEnabledBorrowing) { return &globalBuff[2]; }
+};
+template<>
+inline constexpr bool std::ranges::enable_borrowed_range<EndFunctionEnabledBorrowing> = true;
+
+struct EndFunctionReturnsEmptyPtr {
+ Empty x;
+ friend constexpr const Empty *begin(EndFunctionReturnsEmptyPtr const&) { return nullptr; }
+ friend constexpr const Empty *end(EndFunctionReturnsEmptyPtr const& bf) { return &bf.x; }
+};
+
+struct EndFunctionWithDataMember {
+ int x;
+ int end;
+ friend constexpr const int *begin(EndFunctionWithDataMember const&) { return nullptr; }
+ friend constexpr const int *end(EndFunctionWithDataMember const& bf) { return &bf.x; }
+};
+
+struct EndFunctionWithPrivateEndMember {
+ int y;
+ friend constexpr const int *begin(EndFunctionWithPrivateEndMember const&) { return nullptr; }
+ friend constexpr const int *end(EndFunctionWithPrivateEndMember const& bf) { return &bf.y; }
+private:
+ const int *end() const;
+};
+
struct BeginMemberEndFunction {
int x;
constexpr const int *begin() const { return nullptr; }
diff --git a/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/member_typedefs.compile.pass.cpp b/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/member_typedefs.compile.pass.cpp
index 26d6bafbe5ee..4dcf5d18ebbc 100644
--- a/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/member_typedefs.compile.pass.cpp
+++ b/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/member_typedefs.compile.pass.cpp
@@ -12,8 +12,9 @@
// Test iterator category and iterator concepts.
-#include <ranges>
#include <cassert>
+#include <cstdint>
+#include <ranges>
#include "test_macros.h"
#include "../types.h"
diff --git a/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/minus.pass.cpp b/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/minus.pass.cpp
index f4181801a948..01c8539ec904 100644
--- a/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/minus.pass.cpp
+++ b/libcxx/test/std/ranges/range.factories/range.iota.view/iterator/minus.pass.cpp
@@ -15,8 +15,9 @@
// friend constexpr difference_type operator-(const iterator& x, const iterator& y)
// requires advanceable<W>;
-#include <ranges>
#include <cassert>
+#include <cstdint>
+#include <ranges>
#include "test_macros.h"
#include "../types.h"
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/c_str.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/c_str.pass.cpp
index ee7f4c4718af..13a98fc31461 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/c_str.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/c_str.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s)
{
typedef typename S::traits_type T;
@@ -31,23 +31,32 @@ test(const S& s)
assert(T::eq(str[0], typename S::value_type()));
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""));
test(S("abcde"));
test(S("abcdefghij"));
test(S("abcdefghijklmnopqrst"));
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""));
test(S("abcde"));
test(S("abcdefghij"));
test(S("abcdefghijklmnopqrst"));
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/data.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/data.pass.cpp
index 6aa07302a951..cb28c53982a2 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/data.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/data.pass.cpp
@@ -18,7 +18,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test_const(const S& s)
{
typedef typename S::traits_type T;
@@ -33,7 +33,7 @@ test_const(const S& s)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test_nonconst(S& s)
{
typedef typename S::traits_type T;
@@ -47,32 +47,41 @@ test_nonconst(S& s)
assert(T::eq(str[0], typename S::value_type()));
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test_const(S(""));
test_const(S("abcde"));
test_const(S("abcdefghij"));
test_const(S("abcdefghijklmnopqrst"));
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test_const(S(""));
test_const(S("abcde"));
test_const(S("abcdefghij"));
test_const(S("abcdefghijklmnopqrst"));
- }
+ }
#endif
#if TEST_STD_VER > 14
- {
+ {
typedef std::string S;
S s1(""); test_nonconst(s1);
S s2("abcde"); test_nonconst(s2);
S s3("abcdefghij"); test_nonconst(s3);
S s4("abcdefghijklmnopqrst"); test_nonconst(s4);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/get_allocator.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/get_allocator.pass.cpp
index 652e995e1c47..9adad2a9fe63 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string.accessors/get_allocator.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string.accessors/get_allocator.pass.cpp
@@ -18,31 +18,40 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::allocator_type& a)
{
assert(s.get_allocator() == a);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef test_allocator<char> A;
typedef std::basic_string<char, std::char_traits<char>, A> S;
test(S(""), A());
test(S("abcde", A(1)), A(1));
test(S("abcdefghij", A(2)), A(2));
test(S("abcdefghijklmnopqrst", A(3)), A(3));
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef min_allocator<char> A;
typedef std::basic_string<char, std::char_traits<char>, A> S;
test(S(""), A());
test(S("abcde", A()), A());
test(S("abcdefghij", A()), A());
test(S("abcdefghijklmnopqrst", A()), A());
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/pointer.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/pointer.pass.cpp
index 072f062fca75..316f64bd66f2 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/pointer.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/pointer.pass.cpp
@@ -16,7 +16,7 @@
#include "test_macros.h"
#include "min_allocator.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -26,16 +26,15 @@ int sign(int x)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, int x)
{
LIBCPP_ASSERT_NOEXCEPT(s.compare(str));
assert(sign(s.compare(str)) == sign(x));
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), "", 0);
test(S(""), "abcde", -5);
@@ -53,9 +52,9 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), "abcde", 15);
test(S("abcdefghijklmnopqrst"), "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), "abcdefghijklmnopqrst", 0);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), "", 0);
test(S(""), "abcde", -5);
@@ -73,7 +72,17 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), "abcde", 15);
test(S("abcdefghijklmnopqrst"), "abcdefghij", 10);
test(S("abcdefghijklmnopqrst"), "abcdefghijklmnopqrst", 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_T_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_T_size_size.pass.cpp
index abab9879a14e..81fc6ba5e5e2 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_T_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_T_size_size.pass.cpp
@@ -80,7 +80,7 @@ test_npos(const S& s, typename S::size_type pos1, typename S::size_type n1,
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 bool test0()
{
test(S(""), 0, 0, SV(""), 0, 0, 0);
test(S(""), 0, 0, SV(""), 0, 1, 0);
@@ -182,10 +182,12 @@ void test0()
test(S(""), 0, 1, SV("abcde"), 5, 0, 0);
test(S(""), 0, 1, SV("abcde"), 5, 1, 0);
test(S(""), 0, 1, SV("abcde"), 6, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 bool test1()
{
test(S(""), 0, 1, SV("abcdefghij"), 0, 0, 0);
test(S(""), 0, 1, SV("abcdefghij"), 0, 1, -1);
@@ -287,10 +289,12 @@ void test1()
test(S(""), 1, 0, SV("abcdefghij"), 11, 0, 0);
test(S(""), 1, 0, SV("abcdefghijklmnopqrst"), 0, 0, 0);
test(S(""), 1, 0, SV("abcdefghijklmnopqrst"), 0, 1, 0);
+
+ return true;
}
template <class S, class SV>
-void test2()
+TEST_CONSTEXPR_CXX20 bool test2()
{
test(S(""), 1, 0, SV("abcdefghijklmnopqrst"), 0, 10, 0);
test(S(""), 1, 0, SV("abcdefghijklmnopqrst"), 0, 19, 0);
@@ -392,10 +396,12 @@ void test2()
test(S("abcde"), 0, 1, SV(""), 0, 1, 1);
test(S("abcde"), 0, 1, SV(""), 1, 0, 0);
test(S("abcde"), 0, 1, SV("abcde"), 0, 0, 1);
+
+ return true;
}
template <class S, class SV>
-void test3()
+TEST_CONSTEXPR_CXX20 bool test3()
{
test(S("abcde"), 0, 1, SV("abcde"), 0, 1, 0);
test(S("abcde"), 0, 1, SV("abcde"), 0, 2, -1);
@@ -497,10 +503,12 @@ void test3()
test(S("abcde"), 0, 2, SV("abcdefghij"), 0, 1, 1);
test(S("abcde"), 0, 2, SV("abcdefghij"), 0, 5, -3);
test(S("abcde"), 0, 2, SV("abcdefghij"), 0, 9, -7);
+
+ return true;
}
template <class S, class SV>
-void test4()
+TEST_CONSTEXPR_CXX20 bool test4()
{
test(S("abcde"), 0, 2, SV("abcdefghij"), 0, 10, -8);
test(S("abcde"), 0, 2, SV("abcdefghij"), 0, 11, -8);
@@ -602,10 +610,12 @@ void test4()
test(S("abcde"), 0, 4, SV("abcdefghijklmnopqrst"), 0, 19, -15);
test(S("abcde"), 0, 4, SV("abcdefghijklmnopqrst"), 0, 20, -16);
test(S("abcde"), 0, 4, SV("abcdefghijklmnopqrst"), 0, 21, -16);
+
+ return true;
}
template <class S, class SV>
-void test5()
+TEST_CONSTEXPR_CXX20 bool test5()
{
test(S("abcde"), 0, 4, SV("abcdefghijklmnopqrst"), 1, 0, 4);
test(S("abcde"), 0, 4, SV("abcdefghijklmnopqrst"), 1, 1, -1);
@@ -707,10 +717,12 @@ void test5()
test(S("abcde"), 0, 6, SV("abcde"), 0, 2, 3);
test(S("abcde"), 0, 6, SV("abcde"), 0, 4, 1);
test(S("abcde"), 0, 6, SV("abcde"), 0, 5, 0);
+
+ return true;
}
template <class S, class SV>
-void test6()
+TEST_CONSTEXPR_CXX20 bool test6()
{
test(S("abcde"), 0, 6, SV("abcde"), 0, 6, 0);
test(S("abcde"), 0, 6, SV("abcde"), 1, 0, 5);
@@ -812,10 +824,12 @@ void test6()
test(S("abcde"), 1, 0, SV("abcdefghij"), 0, 11, -10);
test(S("abcde"), 1, 0, SV("abcdefghij"), 1, 0, 0);
test(S("abcde"), 1, 0, SV("abcdefghij"), 1, 1, -1);
+
+ return true;
}
template <class S, class SV>
-void test7()
+TEST_CONSTEXPR_CXX20 bool test7()
{
test(S("abcde"), 1, 0, SV("abcdefghij"), 1, 4, -4);
test(S("abcde"), 1, 0, SV("abcdefghij"), 1, 8, -8);
@@ -917,10 +931,12 @@ void test7()
test(S("abcde"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 1, 0);
test(S("abcde"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 9, -8);
test(S("abcde"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 18, -17);
+
+ return true;
}
template <class S, class SV>
-void test8()
+TEST_CONSTEXPR_CXX20 bool test8()
{
test(S("abcde"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 19, -18);
test(S("abcde"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 20, -18);
@@ -1022,10 +1038,12 @@ void test8()
test(S("abcde"), 1, 3, SV("abcde"), 1, 0, 3);
test(S("abcde"), 1, 3, SV("abcde"), 1, 1, 2);
test(S("abcde"), 1, 3, SV("abcde"), 1, 2, 1);
+
+ return true;
}
template <class S, class SV>
-void test9()
+TEST_CONSTEXPR_CXX20 bool test9()
{
test(S("abcde"), 1, 3, SV("abcde"), 1, 3, 0);
test(S("abcde"), 1, 3, SV("abcde"), 1, 4, -1);
@@ -1127,10 +1145,12 @@ void test9()
test(S("abcde"), 1, 4, SV("abcdefghij"), 1, 8, -4);
test(S("abcde"), 1, 4, SV("abcdefghij"), 1, 9, -5);
test(S("abcde"), 1, 4, SV("abcdefghij"), 1, 10, -5);
+
+ return true;
}
template <class S, class SV>
-void test10()
+TEST_CONSTEXPR_CXX20 bool test10()
{
test(S("abcde"), 1, 4, SV("abcdefghij"), 5, 0, 4);
test(S("abcde"), 1, 4, SV("abcdefghij"), 5, 1, -4);
@@ -1232,10 +1252,12 @@ void test10()
test(S("abcde"), 1, 5, SV("abcdefghijklmnopqrst"), 1, 20, -15);
test(S("abcde"), 1, 5, SV("abcdefghijklmnopqrst"), 10, 0, 4);
test(S("abcde"), 1, 5, SV("abcdefghijklmnopqrst"), 10, 1, -9);
+
+ return true;
}
template <class S, class SV>
-void test11()
+TEST_CONSTEXPR_CXX20 bool test11()
{
test(S("abcde"), 1, 5, SV("abcdefghijklmnopqrst"), 10, 5, -9);
test(S("abcde"), 1, 5, SV("abcdefghijklmnopqrst"), 10, 9, -9);
@@ -1337,10 +1359,12 @@ void test11()
test(S("abcde"), 2, 1, SV("abcde"), 1, 4, 1);
test(S("abcde"), 2, 1, SV("abcde"), 1, 5, 1);
test(S("abcde"), 2, 1, SV("abcde"), 2, 0, 1);
+
+ return true;
}
template <class S, class SV>
-void test12()
+TEST_CONSTEXPR_CXX20 bool test12()
{
test(S("abcde"), 2, 1, SV("abcde"), 2, 1, 0);
test(S("abcde"), 2, 1, SV("abcde"), 2, 2, -1);
@@ -1442,10 +1466,12 @@ void test12()
test(S("abcde"), 2, 2, SV("abcdefghij"), 5, 1, -3);
test(S("abcde"), 2, 2, SV("abcdefghij"), 5, 2, -3);
test(S("abcde"), 2, 2, SV("abcdefghij"), 5, 4, -3);
+
+ return true;
}
template <class S, class SV>
-void test13()
+TEST_CONSTEXPR_CXX20 bool test13()
{
test(S("abcde"), 2, 2, SV("abcdefghij"), 5, 5, -3);
test(S("abcde"), 2, 2, SV("abcdefghij"), 5, 6, -3);
@@ -1547,10 +1573,12 @@ void test13()
test(S("abcde"), 2, 3, SV("abcdefghijklmnopqrst"), 10, 9, -8);
test(S("abcde"), 2, 3, SV("abcdefghijklmnopqrst"), 10, 10, -8);
test(S("abcde"), 2, 3, SV("abcdefghijklmnopqrst"), 10, 11, -8);
+
+ return true;
}
template <class S, class SV>
-void test14()
+TEST_CONSTEXPR_CXX20 bool test14()
{
test(S("abcde"), 2, 3, SV("abcdefghijklmnopqrst"), 19, 0, 3);
test(S("abcde"), 2, 3, SV("abcdefghijklmnopqrst"), 19, 1, -17);
@@ -1652,10 +1680,12 @@ void test14()
test(S("abcde"), 4, 0, SV("abcde"), 2, 2, -2);
test(S("abcde"), 4, 0, SV("abcde"), 2, 3, -3);
test(S("abcde"), 4, 0, SV("abcde"), 2, 4, -3);
+
+ return true;
}
template <class S, class SV>
-void test15()
+TEST_CONSTEXPR_CXX20 bool test15()
{
test(S("abcde"), 4, 0, SV("abcde"), 4, 0, 0);
test(S("abcde"), 4, 0, SV("abcde"), 4, 1, -1);
@@ -1757,10 +1787,12 @@ void test15()
test(S("abcde"), 4, 1, SV("abcdefghij"), 5, 6, -1);
test(S("abcde"), 4, 1, SV("abcdefghij"), 9, 0, 1);
test(S("abcde"), 4, 1, SV("abcdefghij"), 9, 1, -5);
+
+ return true;
}
template <class S, class SV>
-void test16()
+TEST_CONSTEXPR_CXX20 bool test16()
{
test(S("abcde"), 4, 1, SV("abcdefghij"), 9, 2, -5);
test(S("abcde"), 4, 1, SV("abcdefghij"), 10, 0, 1);
@@ -1862,10 +1894,12 @@ void test16()
test(S("abcde"), 4, 2, SV("abcdefghijklmnopqrst"), 19, 1, -15);
test(S("abcde"), 4, 2, SV("abcdefghijklmnopqrst"), 19, 2, -15);
test(S("abcde"), 4, 2, SV("abcdefghijklmnopqrst"), 20, 0, 1);
+
+ return true;
}
template <class S, class SV>
-void test17()
+TEST_CONSTEXPR_CXX20 bool test17()
{
test(S("abcde"), 4, 2, SV("abcdefghijklmnopqrst"), 20, 1, 1);
test(S("abcde"), 4, 2, SV("abcdefghijklmnopqrst"), 21, 0, 0);
@@ -1967,10 +2001,12 @@ void test17()
test(S("abcde"), 5, 1, SV("abcde"), 4, 1, -1);
test(S("abcde"), 5, 1, SV("abcde"), 4, 2, -1);
test(S("abcde"), 5, 1, SV("abcde"), 5, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test18()
+TEST_CONSTEXPR_CXX20 bool test18()
{
test(S("abcde"), 5, 1, SV("abcde"), 5, 1, 0);
test(S("abcde"), 5, 1, SV("abcde"), 6, 0, 0);
@@ -2072,10 +2108,12 @@ void test18()
test(S("abcde"), 6, 0, SV("abcdefghij"), 10, 0, 0);
test(S("abcde"), 6, 0, SV("abcdefghij"), 10, 1, 0);
test(S("abcde"), 6, 0, SV("abcdefghij"), 11, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test19()
+TEST_CONSTEXPR_CXX20 bool test19()
{
test(S("abcde"), 6, 0, SV("abcdefghijklmnopqrst"), 0, 0, 0);
test(S("abcde"), 6, 0, SV("abcdefghijklmnopqrst"), 0, 1, 0);
@@ -2177,10 +2215,12 @@ void test19()
test(S("abcdefghij"), 0, 0, SV("abcdefghijklmnopqrst"), 21, 0, 0);
test(S("abcdefghij"), 0, 1, SV(""), 0, 0, 1);
test(S("abcdefghij"), 0, 1, SV(""), 0, 1, 1);
+
+ return true;
}
template <class S, class SV>
-void test20()
+TEST_CONSTEXPR_CXX20 bool test20()
{
test(S("abcdefghij"), 0, 1, SV(""), 1, 0, 0);
test(S("abcdefghij"), 0, 1, SV("abcde"), 0, 0, 1);
@@ -2282,10 +2322,12 @@ void test20()
test(S("abcdefghij"), 0, 5, SV("abcde"), 6, 0, 0);
test(S("abcdefghij"), 0, 5, SV("abcdefghij"), 0, 0, 5);
test(S("abcdefghij"), 0, 5, SV("abcdefghij"), 0, 1, 4);
+
+ return true;
}
template <class S, class SV>
-void test21()
+TEST_CONSTEXPR_CXX20 bool test21()
{
test(S("abcdefghij"), 0, 5, SV("abcdefghij"), 0, 5, 0);
test(S("abcdefghij"), 0, 5, SV("abcdefghij"), 0, 9, -4);
@@ -2387,10 +2429,12 @@ void test21()
test(S("abcdefghij"), 0, 9, SV("abcdefghijklmnopqrst"), 0, 1, 8);
test(S("abcdefghij"), 0, 9, SV("abcdefghijklmnopqrst"), 0, 10, -1);
test(S("abcdefghij"), 0, 9, SV("abcdefghijklmnopqrst"), 0, 19, -10);
+
+ return true;
}
template <class S, class SV>
-void test22()
+TEST_CONSTEXPR_CXX20 bool test22()
{
test(S("abcdefghij"), 0, 9, SV("abcdefghijklmnopqrst"), 0, 20, -11);
test(S("abcdefghij"), 0, 9, SV("abcdefghijklmnopqrst"), 0, 21, -11);
@@ -2492,10 +2536,12 @@ void test22()
test(S("abcdefghij"), 0, 11, SV("abcde"), 0, 0, 10);
test(S("abcdefghij"), 0, 11, SV("abcde"), 0, 1, 9);
test(S("abcdefghij"), 0, 11, SV("abcde"), 0, 2, 8);
+
+ return true;
}
template <class S, class SV>
-void test23()
+TEST_CONSTEXPR_CXX20 bool test23()
{
test(S("abcdefghij"), 0, 11, SV("abcde"), 0, 4, 6);
test(S("abcdefghij"), 0, 11, SV("abcde"), 0, 5, 5);
@@ -2597,10 +2643,12 @@ void test23()
test(S("abcdefghij"), 1, 0, SV("abcdefghij"), 0, 9, -9);
test(S("abcdefghij"), 1, 0, SV("abcdefghij"), 0, 10, -10);
test(S("abcdefghij"), 1, 0, SV("abcdefghij"), 0, 11, -10);
+
+ return true;
}
template <class S, class SV>
-void test24()
+TEST_CONSTEXPR_CXX20 bool test24()
{
test(S("abcdefghij"), 1, 0, SV("abcdefghij"), 1, 0, 0);
test(S("abcdefghij"), 1, 0, SV("abcdefghij"), 1, 1, -1);
@@ -2702,10 +2750,12 @@ void test24()
test(S("abcdefghij"), 1, 1, SV("abcdefghijklmnopqrst"), 0, 21, 1);
test(S("abcdefghij"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 0, 1);
test(S("abcdefghij"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 1, 0);
+
+ return true;
}
template <class S, class SV>
-void test25()
+TEST_CONSTEXPR_CXX20 bool test25()
{
test(S("abcdefghij"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 9, -8);
test(S("abcdefghij"), 1, 1, SV("abcdefghijklmnopqrst"), 1, 18, -17);
@@ -2807,10 +2857,12 @@ void test25()
test(S("abcdefghij"), 1, 8, SV("abcde"), 0, 5, 1);
test(S("abcdefghij"), 1, 8, SV("abcde"), 0, 6, 1);
test(S("abcdefghij"), 1, 8, SV("abcde"), 1, 0, 8);
+
+ return true;
}
template <class S, class SV>
-void test26()
+TEST_CONSTEXPR_CXX20 bool test26()
{
test(S("abcdefghij"), 1, 8, SV("abcde"), 1, 1, 7);
test(S("abcdefghij"), 1, 8, SV("abcde"), 1, 2, 6);
@@ -2912,10 +2964,12 @@ void test26()
test(S("abcdefghij"), 1, 9, SV("abcdefghij"), 1, 1, 8);
test(S("abcdefghij"), 1, 9, SV("abcdefghij"), 1, 4, 5);
test(S("abcdefghij"), 1, 9, SV("abcdefghij"), 1, 8, 1);
+
+ return true;
}
template <class S, class SV>
-void test27()
+TEST_CONSTEXPR_CXX20 bool test27()
{
test(S("abcdefghij"), 1, 9, SV("abcdefghij"), 1, 9, 0);
test(S("abcdefghij"), 1, 9, SV("abcdefghij"), 1, 10, 0);
@@ -3017,10 +3071,12 @@ void test27()
test(S("abcdefghij"), 1, 10, SV("abcdefghijklmnopqrst"), 1, 18, -9);
test(S("abcdefghij"), 1, 10, SV("abcdefghijklmnopqrst"), 1, 19, -10);
test(S("abcdefghij"), 1, 10, SV("abcdefghijklmnopqrst"), 1, 20, -10);
+
+ return true;
}
template <class S, class SV>
-void test28()
+TEST_CONSTEXPR_CXX20 bool test28()
{
test(S("abcdefghij"), 1, 10, SV("abcdefghijklmnopqrst"), 10, 0, 9);
test(S("abcdefghij"), 1, 10, SV("abcdefghijklmnopqrst"), 10, 1, -9);
@@ -3122,10 +3178,12 @@ void test28()
test(S("abcdefghij"), 5, 1, SV("abcde"), 1, 2, 4);
test(S("abcdefghij"), 5, 1, SV("abcde"), 1, 3, 4);
test(S("abcdefghij"), 5, 1, SV("abcde"), 1, 4, 4);
+
+ return true;
}
template <class S, class SV>
-void test29()
+TEST_CONSTEXPR_CXX20 bool test29()
{
test(S("abcdefghij"), 5, 1, SV("abcde"), 1, 5, 4);
test(S("abcdefghij"), 5, 1, SV("abcde"), 2, 0, 1);
@@ -3227,10 +3285,12 @@ void test29()
test(S("abcdefghij"), 5, 2, SV("abcdefghij"), 1, 10, 4);
test(S("abcdefghij"), 5, 2, SV("abcdefghij"), 5, 0, 2);
test(S("abcdefghij"), 5, 2, SV("abcdefghij"), 5, 1, 1);
+
+ return true;
}
template <class S, class SV>
-void test30()
+TEST_CONSTEXPR_CXX20 bool test30()
{
test(S("abcdefghij"), 5, 2, SV("abcdefghij"), 5, 2, 0);
test(S("abcdefghij"), 5, 2, SV("abcdefghij"), 5, 4, -2);
@@ -3332,10 +3392,12 @@ void test30()
test(S("abcdefghij"), 5, 4, SV("abcdefghijklmnopqrst"), 10, 1, -5);
test(S("abcdefghij"), 5, 4, SV("abcdefghijklmnopqrst"), 10, 5, -5);
test(S("abcdefghij"), 5, 4, SV("abcdefghijklmnopqrst"), 10, 9, -5);
+
+ return true;
}
template <class S, class SV>
-void test31()
+TEST_CONSTEXPR_CXX20 bool test31()
{
test(S("abcdefghij"), 5, 4, SV("abcdefghijklmnopqrst"), 10, 10, -5);
test(S("abcdefghij"), 5, 4, SV("abcdefghijklmnopqrst"), 10, 11, -5);
@@ -3437,10 +3499,12 @@ void test31()
test(S("abcdefghij"), 5, 6, SV("abcde"), 2, 0, 5);
test(S("abcdefghij"), 5, 6, SV("abcde"), 2, 1, 3);
test(S("abcdefghij"), 5, 6, SV("abcde"), 2, 2, 3);
+
+ return true;
}
template <class S, class SV>
-void test32()
+TEST_CONSTEXPR_CXX20 bool test32()
{
test(S("abcdefghij"), 5, 6, SV("abcde"), 2, 3, 3);
test(S("abcdefghij"), 5, 6, SV("abcde"), 2, 4, 3);
@@ -3542,10 +3606,12 @@ void test32()
test(S("abcdefghij"), 9, 0, SV("abcdefghij"), 5, 4, -4);
test(S("abcdefghij"), 9, 0, SV("abcdefghij"), 5, 5, -5);
test(S("abcdefghij"), 9, 0, SV("abcdefghij"), 5, 6, -5);
+
+ return true;
}
template <class S, class SV>
-void test33()
+TEST_CONSTEXPR_CXX20 bool test33()
{
test(S("abcdefghij"), 9, 0, SV("abcdefghij"), 9, 0, 0);
test(S("abcdefghij"), 9, 0, SV("abcdefghij"), 9, 1, -1);
@@ -3647,10 +3713,12 @@ void test33()
test(S("abcdefghij"), 9, 1, SV("abcdefghijklmnopqrst"), 10, 11, -1);
test(S("abcdefghij"), 9, 1, SV("abcdefghijklmnopqrst"), 19, 0, 1);
test(S("abcdefghij"), 9, 1, SV("abcdefghijklmnopqrst"), 19, 1, -10);
+
+ return true;
}
template <class S, class SV>
-void test34()
+TEST_CONSTEXPR_CXX20 bool test34()
{
test(S("abcdefghij"), 9, 1, SV("abcdefghijklmnopqrst"), 19, 2, -10);
test(S("abcdefghij"), 9, 1, SV("abcdefghijklmnopqrst"), 20, 0, 1);
@@ -3752,10 +3820,12 @@ void test34()
test(S("abcdefghij"), 10, 0, SV("abcde"), 2, 4, -3);
test(S("abcdefghij"), 10, 0, SV("abcde"), 4, 0, 0);
test(S("abcdefghij"), 10, 0, SV("abcde"), 4, 1, -1);
+
+ return true;
}
template <class S, class SV>
-void test35()
+TEST_CONSTEXPR_CXX20 bool test35()
{
test(S("abcdefghij"), 10, 0, SV("abcde"), 4, 2, -1);
test(S("abcdefghij"), 10, 0, SV("abcde"), 5, 0, 0);
@@ -3857,10 +3927,12 @@ void test35()
test(S("abcdefghij"), 10, 1, SV("abcdefghij"), 9, 1, -1);
test(S("abcdefghij"), 10, 1, SV("abcdefghij"), 9, 2, -1);
test(S("abcdefghij"), 10, 1, SV("abcdefghij"), 10, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test36()
+TEST_CONSTEXPR_CXX20 bool test36()
{
test(S("abcdefghij"), 10, 1, SV("abcdefghij"), 10, 1, 0);
test(S("abcdefghij"), 10, 1, SV("abcdefghij"), 11, 0, 0);
@@ -3962,10 +4034,12 @@ void test36()
test(S("abcdefghij"), 11, 0, SV("abcdefghijklmnopqrst"), 20, 0, 0);
test(S("abcdefghij"), 11, 0, SV("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghij"), 11, 0, SV("abcdefghijklmnopqrst"), 21, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test37()
+TEST_CONSTEXPR_CXX20 bool test37()
{
test(S("abcdefghijklmnopqrst"), 0, 0, SV(""), 0, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 0, SV(""), 0, 1, 0);
@@ -4067,10 +4141,12 @@ void test37()
test(S("abcdefghijklmnopqrst"), 0, 1, SV("abcde"), 5, 0, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, SV("abcde"), 5, 1, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, SV("abcde"), 6, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test38()
+TEST_CONSTEXPR_CXX20 bool test38()
{
test(S("abcdefghijklmnopqrst"), 0, 1, SV("abcdefghij"), 0, 0, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, SV("abcdefghij"), 0, 1, 0);
@@ -4172,10 +4248,12 @@ void test38()
test(S("abcdefghijklmnopqrst"), 0, 10, SV("abcdefghij"), 11, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 10, SV("abcdefghijklmnopqrst"), 0, 0, 10);
test(S("abcdefghijklmnopqrst"), 0, 10, SV("abcdefghijklmnopqrst"), 0, 1, 9);
+
+ return true;
}
template <class S, class SV>
-void test39()
+TEST_CONSTEXPR_CXX20 bool test39()
{
test(S("abcdefghijklmnopqrst"), 0, 10, SV("abcdefghijklmnopqrst"), 0, 10, 0);
test(S("abcdefghijklmnopqrst"), 0, 10, SV("abcdefghijklmnopqrst"), 0, 19, -9);
@@ -4277,10 +4355,12 @@ void test39()
test(S("abcdefghijklmnopqrst"), 0, 20, SV(""), 0, 1, 20);
test(S("abcdefghijklmnopqrst"), 0, 20, SV(""), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 20, SV("abcde"), 0, 0, 20);
+
+ return true;
}
template <class S, class SV>
-void test40()
+TEST_CONSTEXPR_CXX20 bool test40()
{
test(S("abcdefghijklmnopqrst"), 0, 20, SV("abcde"), 0, 1, 19);
test(S("abcdefghijklmnopqrst"), 0, 20, SV("abcde"), 0, 2, 18);
@@ -4382,10 +4462,12 @@ void test40()
test(S("abcdefghijklmnopqrst"), 0, 21, SV("abcdefghij"), 0, 1, 19);
test(S("abcdefghijklmnopqrst"), 0, 21, SV("abcdefghij"), 0, 5, 15);
test(S("abcdefghijklmnopqrst"), 0, 21, SV("abcdefghij"), 0, 9, 11);
+
+ return true;
}
template <class S, class SV>
-void test41()
+TEST_CONSTEXPR_CXX20 bool test41()
{
test(S("abcdefghijklmnopqrst"), 0, 21, SV("abcdefghij"), 0, 10, 10);
test(S("abcdefghijklmnopqrst"), 0, 21, SV("abcdefghij"), 0, 11, 10);
@@ -4487,10 +4569,12 @@ void test41()
test(S("abcdefghijklmnopqrst"), 1, 0, SV("abcdefghijklmnopqrst"), 0, 19, -19);
test(S("abcdefghijklmnopqrst"), 1, 0, SV("abcdefghijklmnopqrst"), 0, 20, -20);
test(S("abcdefghijklmnopqrst"), 1, 0, SV("abcdefghijklmnopqrst"), 0, 21, -20);
+
+ return true;
}
template <class S, class SV>
-void test42()
+TEST_CONSTEXPR_CXX20 bool test42()
{
test(S("abcdefghijklmnopqrst"), 1, 0, SV("abcdefghijklmnopqrst"), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 1, 0, SV("abcdefghijklmnopqrst"), 1, 1, -1);
@@ -4592,10 +4676,12 @@ void test42()
test(S("abcdefghijklmnopqrst"), 1, 9, SV("abcde"), 0, 2, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, SV("abcde"), 0, 4, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, SV("abcde"), 0, 5, 1);
+
+ return true;
}
template <class S, class SV>
-void test43()
+TEST_CONSTEXPR_CXX20 bool test43()
{
test(S("abcdefghijklmnopqrst"), 1, 9, SV("abcde"), 0, 6, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, SV("abcde"), 1, 0, 9);
@@ -4697,10 +4783,12 @@ void test43()
test(S("abcdefghijklmnopqrst"), 1, 18, SV("abcdefghij"), 0, 11, 1);
test(S("abcdefghijklmnopqrst"), 1, 18, SV("abcdefghij"), 1, 0, 18);
test(S("abcdefghijklmnopqrst"), 1, 18, SV("abcdefghij"), 1, 1, 17);
+
+ return true;
}
template <class S, class SV>
-void test44()
+TEST_CONSTEXPR_CXX20 bool test44()
{
test(S("abcdefghijklmnopqrst"), 1, 18, SV("abcdefghij"), 1, 4, 14);
test(S("abcdefghijklmnopqrst"), 1, 18, SV("abcdefghij"), 1, 8, 10);
@@ -4802,10 +4890,12 @@ void test44()
test(S("abcdefghijklmnopqrst"), 1, 19, SV("abcdefghijklmnopqrst"), 1, 1, 18);
test(S("abcdefghijklmnopqrst"), 1, 19, SV("abcdefghijklmnopqrst"), 1, 9, 10);
test(S("abcdefghijklmnopqrst"), 1, 19, SV("abcdefghijklmnopqrst"), 1, 18, 1);
+
+ return true;
}
template <class S, class SV>
-void test45()
+TEST_CONSTEXPR_CXX20 bool test45()
{
test(S("abcdefghijklmnopqrst"), 1, 19, SV("abcdefghijklmnopqrst"), 1, 19, 0);
test(S("abcdefghijklmnopqrst"), 1, 19, SV("abcdefghijklmnopqrst"), 1, 20, 0);
@@ -4907,10 +4997,12 @@ void test45()
test(S("abcdefghijklmnopqrst"), 10, 0, SV("abcde"), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 10, 0, SV("abcde"), 1, 1, -1);
test(S("abcdefghijklmnopqrst"), 10, 0, SV("abcde"), 1, 2, -2);
+
+ return true;
}
template <class S, class SV>
-void test46()
+TEST_CONSTEXPR_CXX20 bool test46()
{
test(S("abcdefghijklmnopqrst"), 10, 0, SV("abcde"), 1, 3, -3);
test(S("abcdefghijklmnopqrst"), 10, 0, SV("abcde"), 1, 4, -4);
@@ -5012,10 +5104,12 @@ void test46()
test(S("abcdefghijklmnopqrst"), 10, 1, SV("abcdefghij"), 1, 8, 9);
test(S("abcdefghijklmnopqrst"), 10, 1, SV("abcdefghij"), 1, 9, 9);
test(S("abcdefghijklmnopqrst"), 10, 1, SV("abcdefghij"), 1, 10, 9);
+
+ return true;
}
template <class S, class SV>
-void test47()
+TEST_CONSTEXPR_CXX20 bool test47()
{
test(S("abcdefghijklmnopqrst"), 10, 1, SV("abcdefghij"), 5, 0, 1);
test(S("abcdefghijklmnopqrst"), 10, 1, SV("abcdefghij"), 5, 1, 5);
@@ -5117,10 +5211,12 @@ void test47()
test(S("abcdefghijklmnopqrst"), 10, 5, SV("abcdefghijklmnopqrst"), 1, 20, 9);
test(S("abcdefghijklmnopqrst"), 10, 5, SV("abcdefghijklmnopqrst"), 10, 0, 5);
test(S("abcdefghijklmnopqrst"), 10, 5, SV("abcdefghijklmnopqrst"), 10, 1, 4);
+
+ return true;
}
template <class S, class SV>
-void test48()
+TEST_CONSTEXPR_CXX20 bool test48()
{
test(S("abcdefghijklmnopqrst"), 10, 5, SV("abcdefghijklmnopqrst"), 10, 5, 0);
test(S("abcdefghijklmnopqrst"), 10, 5, SV("abcdefghijklmnopqrst"), 10, 9, -4);
@@ -5222,10 +5318,12 @@ void test48()
test(S("abcdefghijklmnopqrst"), 10, 10, SV("abcde"), 1, 4, 9);
test(S("abcdefghijklmnopqrst"), 10, 10, SV("abcde"), 1, 5, 9);
test(S("abcdefghijklmnopqrst"), 10, 10, SV("abcde"), 2, 0, 10);
+
+ return true;
}
template <class S, class SV>
-void test49()
+TEST_CONSTEXPR_CXX20 bool test49()
{
test(S("abcdefghijklmnopqrst"), 10, 10, SV("abcde"), 2, 1, 8);
test(S("abcdefghijklmnopqrst"), 10, 10, SV("abcde"), 2, 2, 8);
@@ -5327,10 +5425,12 @@ void test49()
test(S("abcdefghijklmnopqrst"), 10, 11, SV("abcdefghij"), 5, 1, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, SV("abcdefghij"), 5, 2, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, SV("abcdefghij"), 5, 4, 5);
+
+ return true;
}
template <class S, class SV>
-void test50()
+TEST_CONSTEXPR_CXX20 bool test50()
{
test(S("abcdefghijklmnopqrst"), 10, 11, SV("abcdefghij"), 5, 5, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, SV("abcdefghij"), 5, 6, 5);
@@ -5432,10 +5532,12 @@ void test50()
test(S("abcdefghijklmnopqrst"), 19, 0, SV("abcdefghijklmnopqrst"), 10, 9, -9);
test(S("abcdefghijklmnopqrst"), 19, 0, SV("abcdefghijklmnopqrst"), 10, 10, -10);
test(S("abcdefghijklmnopqrst"), 19, 0, SV("abcdefghijklmnopqrst"), 10, 11, -10);
+
+ return true;
}
template <class S, class SV>
-void test51()
+TEST_CONSTEXPR_CXX20 bool test51()
{
test(S("abcdefghijklmnopqrst"), 19, 0, SV("abcdefghijklmnopqrst"), 19, 0, 0);
test(S("abcdefghijklmnopqrst"), 19, 0, SV("abcdefghijklmnopqrst"), 19, 1, -1);
@@ -5537,10 +5639,12 @@ void test51()
test(S("abcdefghijklmnopqrst"), 19, 2, SV("abcde"), 2, 2, 17);
test(S("abcdefghijklmnopqrst"), 19, 2, SV("abcde"), 2, 3, 17);
test(S("abcdefghijklmnopqrst"), 19, 2, SV("abcde"), 2, 4, 17);
+
+ return true;
}
template <class S, class SV>
-void test52()
+TEST_CONSTEXPR_CXX20 bool test52()
{
test(S("abcdefghijklmnopqrst"), 19, 2, SV("abcde"), 4, 0, 1);
test(S("abcdefghijklmnopqrst"), 19, 2, SV("abcde"), 4, 1, 15);
@@ -5642,10 +5746,12 @@ void test52()
test(S("abcdefghijklmnopqrst"), 20, 0, SV("abcdefghij"), 5, 6, -5);
test(S("abcdefghijklmnopqrst"), 20, 0, SV("abcdefghij"), 9, 0, 0);
test(S("abcdefghijklmnopqrst"), 20, 0, SV("abcdefghij"), 9, 1, -1);
+
+ return true;
}
template <class S, class SV>
-void test53()
+TEST_CONSTEXPR_CXX20 bool test53()
{
test(S("abcdefghijklmnopqrst"), 20, 0, SV("abcdefghij"), 9, 2, -1);
test(S("abcdefghijklmnopqrst"), 20, 0, SV("abcdefghij"), 10, 0, 0);
@@ -5747,10 +5853,12 @@ void test53()
test(S("abcdefghijklmnopqrst"), 20, 1, SV("abcdefghijklmnopqrst"), 19, 1, -1);
test(S("abcdefghijklmnopqrst"), 20, 1, SV("abcdefghijklmnopqrst"), 19, 2, -1);
test(S("abcdefghijklmnopqrst"), 20, 1, SV("abcdefghijklmnopqrst"), 20, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test54()
+TEST_CONSTEXPR_CXX20 bool test54()
{
test(S("abcdefghijklmnopqrst"), 20, 1, SV("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghijklmnopqrst"), 20, 1, SV("abcdefghijklmnopqrst"), 21, 0, 0);
@@ -5828,16 +5936,20 @@ void test54()
test(S("abcdefghijklmnopqrst"), 21, 0, SV("abcdefghijklmnopqrst"), 20, 0, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, SV("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, SV("abcdefghijklmnopqrst"), 21, 0, 0);
+
+ return true;
}
template <class S, class SV>
-void test55()
+TEST_CONSTEXPR_CXX20 bool test55()
{
test_npos(S(""), 0, 0, SV(""), 0, 0);
test_npos(S(""), 0, 0, SV("abcde"), 0, -5);
test_npos(S("abcde"), 0, 0, SV("abcdefghij"), 0, -10);
test_npos(S("abcde"), 0, 0, SV("abcdefghij"), 1, -9);
test_npos(S("abcde"), 0, 0, SV("abcdefghij"), 5, -5);
+
+ return true;
}
int main(int, char**)
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer.pass.cpp
index 4e77fc09c8e4..c5720f4b85ae 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer.pass.cpp
@@ -18,7 +18,7 @@
#include "test_macros.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -28,7 +28,7 @@ int sign(int x)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::size_type pos1, typename S::size_type n1,
const typename S::value_type* str, int x)
{
@@ -361,21 +361,30 @@ void test2()
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghijklmnopqrst", 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer_size.pass.cpp
index 75efbadcc811..7c1512af339b 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_pointer_size.pass.cpp
@@ -18,7 +18,7 @@
#include "test_macros.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -28,7 +28,7 @@ int sign(int x)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::size_type pos, typename S::size_type n1,
const typename S::value_type* str, typename S::size_type n2, int x)
{
@@ -51,7 +51,7 @@ test(const S& s, typename S::size_type pos, typename S::size_type n1,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 bool test0()
{
test(S(""), 0, 0, "", 0, 0);
test(S(""), 0, 0, "abcde", 0, 0);
@@ -153,10 +153,12 @@ void test0()
test(S("abcde"), 0, 4, "abcde", 0, 4);
test(S("abcde"), 0, 4, "abcde", 1, 3);
test(S("abcde"), 0, 4, "abcde", 2, 2);
+
+ return true;
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 bool test1()
{
test(S("abcde"), 0, 4, "abcde", 4, 0);
test(S("abcde"), 0, 4, "abcde", 5, -1);
@@ -258,10 +260,12 @@ void test1()
test(S("abcde"), 1, 3, "abcde", 5, 1);
test(S("abcde"), 1, 3, "abcdefghij", 0, 3);
test(S("abcde"), 1, 3, "abcdefghij", 1, 1);
+
+ return true;
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 bool test2()
{
test(S("abcde"), 1, 3, "abcdefghij", 5, 1);
test(S("abcde"), 1, 3, "abcdefghij", 9, 1);
@@ -363,10 +367,12 @@ void test2()
test(S("abcde"), 2, 3, "abcdefghij", 9, 2);
test(S("abcde"), 2, 3, "abcdefghij", 10, 2);
test(S("abcde"), 2, 3, "abcdefghijklmnopqrst", 0, 3);
+
+ return true;
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 bool test3()
{
test(S("abcde"), 2, 3, "abcdefghijklmnopqrst", 1, 2);
test(S("abcde"), 2, 3, "abcdefghijklmnopqrst", 10, 2);
@@ -468,10 +474,12 @@ void test3()
test(S("abcde"), 5, 1, "abcdefghijklmnopqrst", 10, -10);
test(S("abcde"), 5, 1, "abcdefghijklmnopqrst", 19, -19);
test(S("abcde"), 5, 1, "abcdefghijklmnopqrst", 20, -20);
+
+ return true;
}
template <class S>
-void test4()
+TEST_CONSTEXPR_CXX20 bool test4()
{
test(S("abcde"), 6, 0, "", 0, 0);
test(S("abcde"), 6, 0, "abcde", 0, 0);
@@ -573,10 +581,12 @@ void test4()
test(S("abcdefghij"), 0, 11, "abcde", 0, 10);
test(S("abcdefghij"), 0, 11, "abcde", 1, 9);
test(S("abcdefghij"), 0, 11, "abcde", 2, 8);
+
+ return true;
}
template <class S>
-void test5()
+TEST_CONSTEXPR_CXX20 bool test5()
{
test(S("abcdefghij"), 0, 11, "abcde", 4, 6);
test(S("abcdefghij"), 0, 11, "abcde", 5, 5);
@@ -678,10 +688,12 @@ void test5()
test(S("abcdefghij"), 1, 10, "abcde", 5, 1);
test(S("abcdefghij"), 1, 10, "abcdefghij", 0, 9);
test(S("abcdefghij"), 1, 10, "abcdefghij", 1, 1);
+
+ return true;
}
template <class S>
-void test6()
+TEST_CONSTEXPR_CXX20 bool test6()
{
test(S("abcdefghij"), 1, 10, "abcdefghij", 5, 1);
test(S("abcdefghij"), 1, 10, "abcdefghij", 9, 1);
@@ -783,10 +795,12 @@ void test6()
test(S("abcdefghij"), 5, 6, "abcdefghij", 9, 5);
test(S("abcdefghij"), 5, 6, "abcdefghij", 10, 5);
test(S("abcdefghij"), 5, 6, "abcdefghijklmnopqrst", 0, 5);
+
+ return true;
}
template <class S>
-void test7()
+TEST_CONSTEXPR_CXX20 bool test7()
{
test(S("abcdefghij"), 5, 6, "abcdefghijklmnopqrst", 1, 5);
test(S("abcdefghij"), 5, 6, "abcdefghijklmnopqrst", 10, 5);
@@ -888,10 +902,12 @@ void test7()
test(S("abcdefghij"), 11, 0, "abcdefghijklmnopqrst", 10, 0);
test(S("abcdefghij"), 11, 0, "abcdefghijklmnopqrst", 19, 0);
test(S("abcdefghij"), 11, 0, "abcdefghijklmnopqrst", 20, 0);
+
+ return true;
}
template <class S>
-void test8()
+TEST_CONSTEXPR_CXX20 bool test8()
{
test(S("abcdefghijklmnopqrst"), 0, 0, "", 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 0, "abcde", 0, 0);
@@ -993,10 +1009,12 @@ void test8()
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", 0, 0);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", 1, -1);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", 2, -2);
+
+ return true;
}
template <class S>
-void test9()
+TEST_CONSTEXPR_CXX20 bool test9()
{
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", 4, -4);
test(S("abcdefghijklmnopqrst"), 1, 0, "abcde", 5, -5);
@@ -1098,10 +1116,12 @@ void test9()
test(S("abcdefghijklmnopqrst"), 10, 0, "abcde", 5, -5);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghij", 0, 0);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghij", 1, -1);
+
+ return true;
}
template <class S>
-void test10()
+TEST_CONSTEXPR_CXX20 bool test10()
{
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghij", 5, -5);
test(S("abcdefghijklmnopqrst"), 10, 0, "abcdefghij", 9, -9);
@@ -1203,10 +1223,12 @@ void test10()
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghij", 9, -9);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghij", 10, -10);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghijklmnopqrst", 0, 0);
+
+ return true;
}
template <class S>
-void test11()
+TEST_CONSTEXPR_CXX20 bool test11()
{
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghijklmnopqrst", 1, -1);
test(S("abcdefghijklmnopqrst"), 19, 0, "abcdefghijklmnopqrst", 10, -10);
@@ -1292,41 +1314,44 @@ void test11()
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghijklmnopqrst", 10, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghijklmnopqrst", 19, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, "abcdefghijklmnopqrst", 20, 0);
+
+ return true;
+}
+
+template <class S>
+void test() {
+ test0<S>();
+ test1<S>();
+ test2<S>();
+ test3<S>();
+ test4<S>();
+ test5<S>();
+ test6<S>();
+ test7<S>();
+ test8<S>();
+ test9<S>();
+ test10<S>();
+ test11<S>();
+
+ // static_assert(test0<S>());
+ // static_assert(test1<S>());
+ // static_assert(test2<S>());
+ // static_assert(test3<S>());
+ // static_assert(test4<S>());
+ // static_assert(test5<S>());
+ // static_assert(test6<S>());
+ // static_assert(test7<S>());
+ // static_assert(test8<S>());
+ // static_assert(test9<S>());
+ // static_assert(test10<S>());
+ // static_assert(test11<S>());
}
int main(int, char**)
{
- {
- typedef std::string S;
- test0<S>();
- test1<S>();
- test2<S>();
- test3<S>();
- test4<S>();
- test5<S>();
- test6<S>();
- test7<S>();
- test8<S>();
- test9<S>();
- test10<S>();
- test11<S>();
- }
+ test<std::string>();
#if TEST_STD_VER >= 11
- {
- typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
- test0<S>();
- test1<S>();
- test2<S>();
- test3<S>();
- test4<S>();
- test5<S>();
- test6<S>();
- test7<S>();
- test8<S>();
- test9<S>();
- test10<S>();
- test11<S>();
- }
+ test<std::basic_string<char, std::char_traits<char>, min_allocator<char>>>();
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string.pass.cpp
index 55fd823718ee..e4c2dcd7b0be 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string.pass.cpp
@@ -360,28 +360,37 @@ void test2()
test(S("abcdefghijklmnopqrst"), 21, 0, S("abcdefghijklmnopqrst"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.compare(0, 1, {"abc", 1}) < 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_size_size.pass.cpp
index 05ba6ac82c55..7a5cd6a11ad2 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_size_size.pass.cpp
@@ -20,7 +20,7 @@
#include "test_macros.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -30,7 +30,7 @@ int sign(int x)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::size_type pos1, typename S::size_type n1,
const S& str, typename S::size_type pos2, typename S::size_type n2, int x)
{
@@ -76,7 +76,7 @@ test_npos(const S& s, typename S::size_type pos1, typename S::size_type n1,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 bool test0()
{
test(S(""), 0, 0, S(""), 0, 0, 0);
test(S(""), 0, 0, S(""), 0, 1, 0);
@@ -178,10 +178,12 @@ void test0()
test(S(""), 0, 1, S("abcde"), 5, 0, 0);
test(S(""), 0, 1, S("abcde"), 5, 1, 0);
test(S(""), 0, 1, S("abcde"), 6, 0, 0);
+
+ return true;
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 bool test1()
{
test(S(""), 0, 1, S("abcdefghij"), 0, 0, 0);
test(S(""), 0, 1, S("abcdefghij"), 0, 1, -1);
@@ -283,10 +285,12 @@ void test1()
test(S(""), 1, 0, S("abcdefghij"), 11, 0, 0);
test(S(""), 1, 0, S("abcdefghijklmnopqrst"), 0, 0, 0);
test(S(""), 1, 0, S("abcdefghijklmnopqrst"), 0, 1, 0);
+
+ return true;
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 bool test2()
{
test(S(""), 1, 0, S("abcdefghijklmnopqrst"), 0, 10, 0);
test(S(""), 1, 0, S("abcdefghijklmnopqrst"), 0, 19, 0);
@@ -388,10 +392,12 @@ void test2()
test(S("abcde"), 0, 1, S(""), 0, 1, 1);
test(S("abcde"), 0, 1, S(""), 1, 0, 0);
test(S("abcde"), 0, 1, S("abcde"), 0, 0, 1);
+
+ return true;
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 bool test3()
{
test(S("abcde"), 0, 1, S("abcde"), 0, 1, 0);
test(S("abcde"), 0, 1, S("abcde"), 0, 2, -1);
@@ -493,10 +499,12 @@ void test3()
test(S("abcde"), 0, 2, S("abcdefghij"), 0, 1, 1);
test(S("abcde"), 0, 2, S("abcdefghij"), 0, 5, -3);
test(S("abcde"), 0, 2, S("abcdefghij"), 0, 9, -7);
+
+ return true;
}
template <class S>
-void test4()
+TEST_CONSTEXPR_CXX20 bool test4()
{
test(S("abcde"), 0, 2, S("abcdefghij"), 0, 10, -8);
test(S("abcde"), 0, 2, S("abcdefghij"), 0, 11, -8);
@@ -598,10 +606,12 @@ void test4()
test(S("abcde"), 0, 4, S("abcdefghijklmnopqrst"), 0, 19, -15);
test(S("abcde"), 0, 4, S("abcdefghijklmnopqrst"), 0, 20, -16);
test(S("abcde"), 0, 4, S("abcdefghijklmnopqrst"), 0, 21, -16);
+
+ return true;
}
template <class S>
-void test5()
+TEST_CONSTEXPR_CXX20 bool test5()
{
test(S("abcde"), 0, 4, S("abcdefghijklmnopqrst"), 1, 0, 4);
test(S("abcde"), 0, 4, S("abcdefghijklmnopqrst"), 1, 1, -1);
@@ -703,10 +713,12 @@ void test5()
test(S("abcde"), 0, 6, S("abcde"), 0, 2, 3);
test(S("abcde"), 0, 6, S("abcde"), 0, 4, 1);
test(S("abcde"), 0, 6, S("abcde"), 0, 5, 0);
+
+ return true;
}
template <class S>
-void test6()
+TEST_CONSTEXPR_CXX20 bool test6()
{
test(S("abcde"), 0, 6, S("abcde"), 0, 6, 0);
test(S("abcde"), 0, 6, S("abcde"), 1, 0, 5);
@@ -808,10 +820,12 @@ void test6()
test(S("abcde"), 1, 0, S("abcdefghij"), 0, 11, -10);
test(S("abcde"), 1, 0, S("abcdefghij"), 1, 0, 0);
test(S("abcde"), 1, 0, S("abcdefghij"), 1, 1, -1);
+
+ return true;
}
template <class S>
-void test7()
+TEST_CONSTEXPR_CXX20 bool test7()
{
test(S("abcde"), 1, 0, S("abcdefghij"), 1, 4, -4);
test(S("abcde"), 1, 0, S("abcdefghij"), 1, 8, -8);
@@ -913,10 +927,12 @@ void test7()
test(S("abcde"), 1, 1, S("abcdefghijklmnopqrst"), 1, 1, 0);
test(S("abcde"), 1, 1, S("abcdefghijklmnopqrst"), 1, 9, -8);
test(S("abcde"), 1, 1, S("abcdefghijklmnopqrst"), 1, 18, -17);
+
+ return true;
}
template <class S>
-void test8()
+TEST_CONSTEXPR_CXX20 bool test8()
{
test(S("abcde"), 1, 1, S("abcdefghijklmnopqrst"), 1, 19, -18);
test(S("abcde"), 1, 1, S("abcdefghijklmnopqrst"), 1, 20, -18);
@@ -1018,10 +1034,12 @@ void test8()
test(S("abcde"), 1, 3, S("abcde"), 1, 0, 3);
test(S("abcde"), 1, 3, S("abcde"), 1, 1, 2);
test(S("abcde"), 1, 3, S("abcde"), 1, 2, 1);
+
+ return true;
}
template <class S>
-void test9()
+TEST_CONSTEXPR_CXX20 bool test9()
{
test(S("abcde"), 1, 3, S("abcde"), 1, 3, 0);
test(S("abcde"), 1, 3, S("abcde"), 1, 4, -1);
@@ -1123,10 +1141,12 @@ void test9()
test(S("abcde"), 1, 4, S("abcdefghij"), 1, 8, -4);
test(S("abcde"), 1, 4, S("abcdefghij"), 1, 9, -5);
test(S("abcde"), 1, 4, S("abcdefghij"), 1, 10, -5);
+
+ return true;
}
template <class S>
-void test10()
+TEST_CONSTEXPR_CXX20 bool test10()
{
test(S("abcde"), 1, 4, S("abcdefghij"), 5, 0, 4);
test(S("abcde"), 1, 4, S("abcdefghij"), 5, 1, -4);
@@ -1228,10 +1248,12 @@ void test10()
test(S("abcde"), 1, 5, S("abcdefghijklmnopqrst"), 1, 20, -15);
test(S("abcde"), 1, 5, S("abcdefghijklmnopqrst"), 10, 0, 4);
test(S("abcde"), 1, 5, S("abcdefghijklmnopqrst"), 10, 1, -9);
+
+ return true;
}
template <class S>
-void test11()
+TEST_CONSTEXPR_CXX20 bool test11()
{
test(S("abcde"), 1, 5, S("abcdefghijklmnopqrst"), 10, 5, -9);
test(S("abcde"), 1, 5, S("abcdefghijklmnopqrst"), 10, 9, -9);
@@ -1333,10 +1355,12 @@ void test11()
test(S("abcde"), 2, 1, S("abcde"), 1, 4, 1);
test(S("abcde"), 2, 1, S("abcde"), 1, 5, 1);
test(S("abcde"), 2, 1, S("abcde"), 2, 0, 1);
+
+ return true;
}
template <class S>
-void test12()
+TEST_CONSTEXPR_CXX20 bool test12()
{
test(S("abcde"), 2, 1, S("abcde"), 2, 1, 0);
test(S("abcde"), 2, 1, S("abcde"), 2, 2, -1);
@@ -1438,10 +1462,12 @@ void test12()
test(S("abcde"), 2, 2, S("abcdefghij"), 5, 1, -3);
test(S("abcde"), 2, 2, S("abcdefghij"), 5, 2, -3);
test(S("abcde"), 2, 2, S("abcdefghij"), 5, 4, -3);
+
+ return true;
}
template <class S>
-void test13()
+TEST_CONSTEXPR_CXX20 bool test13()
{
test(S("abcde"), 2, 2, S("abcdefghij"), 5, 5, -3);
test(S("abcde"), 2, 2, S("abcdefghij"), 5, 6, -3);
@@ -1543,10 +1569,12 @@ void test13()
test(S("abcde"), 2, 3, S("abcdefghijklmnopqrst"), 10, 9, -8);
test(S("abcde"), 2, 3, S("abcdefghijklmnopqrst"), 10, 10, -8);
test(S("abcde"), 2, 3, S("abcdefghijklmnopqrst"), 10, 11, -8);
+
+ return true;
}
template <class S>
-void test14()
+TEST_CONSTEXPR_CXX20 bool test14()
{
test(S("abcde"), 2, 3, S("abcdefghijklmnopqrst"), 19, 0, 3);
test(S("abcde"), 2, 3, S("abcdefghijklmnopqrst"), 19, 1, -17);
@@ -1648,10 +1676,12 @@ void test14()
test(S("abcde"), 4, 0, S("abcde"), 2, 2, -2);
test(S("abcde"), 4, 0, S("abcde"), 2, 3, -3);
test(S("abcde"), 4, 0, S("abcde"), 2, 4, -3);
+
+ return true;
}
template <class S>
-void test15()
+TEST_CONSTEXPR_CXX20 bool test15()
{
test(S("abcde"), 4, 0, S("abcde"), 4, 0, 0);
test(S("abcde"), 4, 0, S("abcde"), 4, 1, -1);
@@ -1753,10 +1783,12 @@ void test15()
test(S("abcde"), 4, 1, S("abcdefghij"), 5, 6, -1);
test(S("abcde"), 4, 1, S("abcdefghij"), 9, 0, 1);
test(S("abcde"), 4, 1, S("abcdefghij"), 9, 1, -5);
+
+ return true;
}
template <class S>
-void test16()
+TEST_CONSTEXPR_CXX20 bool test16()
{
test(S("abcde"), 4, 1, S("abcdefghij"), 9, 2, -5);
test(S("abcde"), 4, 1, S("abcdefghij"), 10, 0, 1);
@@ -1858,10 +1890,12 @@ void test16()
test(S("abcde"), 4, 2, S("abcdefghijklmnopqrst"), 19, 1, -15);
test(S("abcde"), 4, 2, S("abcdefghijklmnopqrst"), 19, 2, -15);
test(S("abcde"), 4, 2, S("abcdefghijklmnopqrst"), 20, 0, 1);
+
+ return true;
}
template <class S>
-void test17()
+TEST_CONSTEXPR_CXX20 bool test17()
{
test(S("abcde"), 4, 2, S("abcdefghijklmnopqrst"), 20, 1, 1);
test(S("abcde"), 4, 2, S("abcdefghijklmnopqrst"), 21, 0, 0);
@@ -1963,10 +1997,12 @@ void test17()
test(S("abcde"), 5, 1, S("abcde"), 4, 1, -1);
test(S("abcde"), 5, 1, S("abcde"), 4, 2, -1);
test(S("abcde"), 5, 1, S("abcde"), 5, 0, 0);
+
+ return true;
}
template <class S>
-void test18()
+TEST_CONSTEXPR_CXX20 bool test18()
{
test(S("abcde"), 5, 1, S("abcde"), 5, 1, 0);
test(S("abcde"), 5, 1, S("abcde"), 6, 0, 0);
@@ -2068,10 +2104,12 @@ void test18()
test(S("abcde"), 6, 0, S("abcdefghij"), 10, 0, 0);
test(S("abcde"), 6, 0, S("abcdefghij"), 10, 1, 0);
test(S("abcde"), 6, 0, S("abcdefghij"), 11, 0, 0);
+
+ return true;
}
template <class S>
-void test19()
+TEST_CONSTEXPR_CXX20 bool test19()
{
test(S("abcde"), 6, 0, S("abcdefghijklmnopqrst"), 0, 0, 0);
test(S("abcde"), 6, 0, S("abcdefghijklmnopqrst"), 0, 1, 0);
@@ -2173,10 +2211,12 @@ void test19()
test(S("abcdefghij"), 0, 0, S("abcdefghijklmnopqrst"), 21, 0, 0);
test(S("abcdefghij"), 0, 1, S(""), 0, 0, 1);
test(S("abcdefghij"), 0, 1, S(""), 0, 1, 1);
+
+ return true;
}
template <class S>
-void test20()
+TEST_CONSTEXPR_CXX20 bool test20()
{
test(S("abcdefghij"), 0, 1, S(""), 1, 0, 0);
test(S("abcdefghij"), 0, 1, S("abcde"), 0, 0, 1);
@@ -2278,10 +2318,12 @@ void test20()
test(S("abcdefghij"), 0, 5, S("abcde"), 6, 0, 0);
test(S("abcdefghij"), 0, 5, S("abcdefghij"), 0, 0, 5);
test(S("abcdefghij"), 0, 5, S("abcdefghij"), 0, 1, 4);
+
+ return true;
}
template <class S>
-void test21()
+TEST_CONSTEXPR_CXX20 bool test21()
{
test(S("abcdefghij"), 0, 5, S("abcdefghij"), 0, 5, 0);
test(S("abcdefghij"), 0, 5, S("abcdefghij"), 0, 9, -4);
@@ -2383,10 +2425,12 @@ void test21()
test(S("abcdefghij"), 0, 9, S("abcdefghijklmnopqrst"), 0, 1, 8);
test(S("abcdefghij"), 0, 9, S("abcdefghijklmnopqrst"), 0, 10, -1);
test(S("abcdefghij"), 0, 9, S("abcdefghijklmnopqrst"), 0, 19, -10);
+
+ return true;
}
template <class S>
-void test22()
+TEST_CONSTEXPR_CXX20 bool test22()
{
test(S("abcdefghij"), 0, 9, S("abcdefghijklmnopqrst"), 0, 20, -11);
test(S("abcdefghij"), 0, 9, S("abcdefghijklmnopqrst"), 0, 21, -11);
@@ -2488,10 +2532,12 @@ void test22()
test(S("abcdefghij"), 0, 11, S("abcde"), 0, 0, 10);
test(S("abcdefghij"), 0, 11, S("abcde"), 0, 1, 9);
test(S("abcdefghij"), 0, 11, S("abcde"), 0, 2, 8);
+
+ return true;
}
template <class S>
-void test23()
+TEST_CONSTEXPR_CXX20 bool test23()
{
test(S("abcdefghij"), 0, 11, S("abcde"), 0, 4, 6);
test(S("abcdefghij"), 0, 11, S("abcde"), 0, 5, 5);
@@ -2593,10 +2639,12 @@ void test23()
test(S("abcdefghij"), 1, 0, S("abcdefghij"), 0, 9, -9);
test(S("abcdefghij"), 1, 0, S("abcdefghij"), 0, 10, -10);
test(S("abcdefghij"), 1, 0, S("abcdefghij"), 0, 11, -10);
+
+ return true;
}
template <class S>
-void test24()
+TEST_CONSTEXPR_CXX20 bool test24()
{
test(S("abcdefghij"), 1, 0, S("abcdefghij"), 1, 0, 0);
test(S("abcdefghij"), 1, 0, S("abcdefghij"), 1, 1, -1);
@@ -2698,10 +2746,12 @@ void test24()
test(S("abcdefghij"), 1, 1, S("abcdefghijklmnopqrst"), 0, 21, 1);
test(S("abcdefghij"), 1, 1, S("abcdefghijklmnopqrst"), 1, 0, 1);
test(S("abcdefghij"), 1, 1, S("abcdefghijklmnopqrst"), 1, 1, 0);
+
+ return true;
}
template <class S>
-void test25()
+TEST_CONSTEXPR_CXX20 bool test25()
{
test(S("abcdefghij"), 1, 1, S("abcdefghijklmnopqrst"), 1, 9, -8);
test(S("abcdefghij"), 1, 1, S("abcdefghijklmnopqrst"), 1, 18, -17);
@@ -2803,10 +2853,12 @@ void test25()
test(S("abcdefghij"), 1, 8, S("abcde"), 0, 5, 1);
test(S("abcdefghij"), 1, 8, S("abcde"), 0, 6, 1);
test(S("abcdefghij"), 1, 8, S("abcde"), 1, 0, 8);
+
+ return true;
}
template <class S>
-void test26()
+TEST_CONSTEXPR_CXX20 bool test26()
{
test(S("abcdefghij"), 1, 8, S("abcde"), 1, 1, 7);
test(S("abcdefghij"), 1, 8, S("abcde"), 1, 2, 6);
@@ -2908,10 +2960,12 @@ void test26()
test(S("abcdefghij"), 1, 9, S("abcdefghij"), 1, 1, 8);
test(S("abcdefghij"), 1, 9, S("abcdefghij"), 1, 4, 5);
test(S("abcdefghij"), 1, 9, S("abcdefghij"), 1, 8, 1);
+
+ return true;
}
template <class S>
-void test27()
+TEST_CONSTEXPR_CXX20 bool test27()
{
test(S("abcdefghij"), 1, 9, S("abcdefghij"), 1, 9, 0);
test(S("abcdefghij"), 1, 9, S("abcdefghij"), 1, 10, 0);
@@ -3013,10 +3067,12 @@ void test27()
test(S("abcdefghij"), 1, 10, S("abcdefghijklmnopqrst"), 1, 18, -9);
test(S("abcdefghij"), 1, 10, S("abcdefghijklmnopqrst"), 1, 19, -10);
test(S("abcdefghij"), 1, 10, S("abcdefghijklmnopqrst"), 1, 20, -10);
+
+ return true;
}
template <class S>
-void test28()
+TEST_CONSTEXPR_CXX20 bool test28()
{
test(S("abcdefghij"), 1, 10, S("abcdefghijklmnopqrst"), 10, 0, 9);
test(S("abcdefghij"), 1, 10, S("abcdefghijklmnopqrst"), 10, 1, -9);
@@ -3118,10 +3174,12 @@ void test28()
test(S("abcdefghij"), 5, 1, S("abcde"), 1, 2, 4);
test(S("abcdefghij"), 5, 1, S("abcde"), 1, 3, 4);
test(S("abcdefghij"), 5, 1, S("abcde"), 1, 4, 4);
+
+ return true;
}
template <class S>
-void test29()
+TEST_CONSTEXPR_CXX20 bool test29()
{
test(S("abcdefghij"), 5, 1, S("abcde"), 1, 5, 4);
test(S("abcdefghij"), 5, 1, S("abcde"), 2, 0, 1);
@@ -3223,10 +3281,12 @@ void test29()
test(S("abcdefghij"), 5, 2, S("abcdefghij"), 1, 10, 4);
test(S("abcdefghij"), 5, 2, S("abcdefghij"), 5, 0, 2);
test(S("abcdefghij"), 5, 2, S("abcdefghij"), 5, 1, 1);
+
+ return true;
}
template <class S>
-void test30()
+TEST_CONSTEXPR_CXX20 bool test30()
{
test(S("abcdefghij"), 5, 2, S("abcdefghij"), 5, 2, 0);
test(S("abcdefghij"), 5, 2, S("abcdefghij"), 5, 4, -2);
@@ -3328,10 +3388,12 @@ void test30()
test(S("abcdefghij"), 5, 4, S("abcdefghijklmnopqrst"), 10, 1, -5);
test(S("abcdefghij"), 5, 4, S("abcdefghijklmnopqrst"), 10, 5, -5);
test(S("abcdefghij"), 5, 4, S("abcdefghijklmnopqrst"), 10, 9, -5);
+
+ return true;
}
template <class S>
-void test31()
+TEST_CONSTEXPR_CXX20 bool test31()
{
test(S("abcdefghij"), 5, 4, S("abcdefghijklmnopqrst"), 10, 10, -5);
test(S("abcdefghij"), 5, 4, S("abcdefghijklmnopqrst"), 10, 11, -5);
@@ -3433,10 +3495,12 @@ void test31()
test(S("abcdefghij"), 5, 6, S("abcde"), 2, 0, 5);
test(S("abcdefghij"), 5, 6, S("abcde"), 2, 1, 3);
test(S("abcdefghij"), 5, 6, S("abcde"), 2, 2, 3);
+
+ return true;
}
template <class S>
-void test32()
+TEST_CONSTEXPR_CXX20 bool test32()
{
test(S("abcdefghij"), 5, 6, S("abcde"), 2, 3, 3);
test(S("abcdefghij"), 5, 6, S("abcde"), 2, 4, 3);
@@ -3538,10 +3602,12 @@ void test32()
test(S("abcdefghij"), 9, 0, S("abcdefghij"), 5, 4, -4);
test(S("abcdefghij"), 9, 0, S("abcdefghij"), 5, 5, -5);
test(S("abcdefghij"), 9, 0, S("abcdefghij"), 5, 6, -5);
+
+ return true;
}
template <class S>
-void test33()
+TEST_CONSTEXPR_CXX20 bool test33()
{
test(S("abcdefghij"), 9, 0, S("abcdefghij"), 9, 0, 0);
test(S("abcdefghij"), 9, 0, S("abcdefghij"), 9, 1, -1);
@@ -3643,10 +3709,12 @@ void test33()
test(S("abcdefghij"), 9, 1, S("abcdefghijklmnopqrst"), 10, 11, -1);
test(S("abcdefghij"), 9, 1, S("abcdefghijklmnopqrst"), 19, 0, 1);
test(S("abcdefghij"), 9, 1, S("abcdefghijklmnopqrst"), 19, 1, -10);
+
+ return true;
}
template <class S>
-void test34()
+TEST_CONSTEXPR_CXX20 bool test34()
{
test(S("abcdefghij"), 9, 1, S("abcdefghijklmnopqrst"), 19, 2, -10);
test(S("abcdefghij"), 9, 1, S("abcdefghijklmnopqrst"), 20, 0, 1);
@@ -3748,10 +3816,12 @@ void test34()
test(S("abcdefghij"), 10, 0, S("abcde"), 2, 4, -3);
test(S("abcdefghij"), 10, 0, S("abcde"), 4, 0, 0);
test(S("abcdefghij"), 10, 0, S("abcde"), 4, 1, -1);
+
+ return true;
}
template <class S>
-void test35()
+TEST_CONSTEXPR_CXX20 bool test35()
{
test(S("abcdefghij"), 10, 0, S("abcde"), 4, 2, -1);
test(S("abcdefghij"), 10, 0, S("abcde"), 5, 0, 0);
@@ -3853,10 +3923,12 @@ void test35()
test(S("abcdefghij"), 10, 1, S("abcdefghij"), 9, 1, -1);
test(S("abcdefghij"), 10, 1, S("abcdefghij"), 9, 2, -1);
test(S("abcdefghij"), 10, 1, S("abcdefghij"), 10, 0, 0);
+
+ return true;
}
template <class S>
-void test36()
+TEST_CONSTEXPR_CXX20 bool test36()
{
test(S("abcdefghij"), 10, 1, S("abcdefghij"), 10, 1, 0);
test(S("abcdefghij"), 10, 1, S("abcdefghij"), 11, 0, 0);
@@ -3958,10 +4030,12 @@ void test36()
test(S("abcdefghij"), 11, 0, S("abcdefghijklmnopqrst"), 20, 0, 0);
test(S("abcdefghij"), 11, 0, S("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghij"), 11, 0, S("abcdefghijklmnopqrst"), 21, 0, 0);
+
+ return true;
}
template <class S>
-void test37()
+TEST_CONSTEXPR_CXX20 bool test37()
{
test(S("abcdefghijklmnopqrst"), 0, 0, S(""), 0, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 0, S(""), 0, 1, 0);
@@ -4063,10 +4137,12 @@ void test37()
test(S("abcdefghijklmnopqrst"), 0, 1, S("abcde"), 5, 0, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, S("abcde"), 5, 1, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, S("abcde"), 6, 0, 0);
+
+ return true;
}
template <class S>
-void test38()
+TEST_CONSTEXPR_CXX20 bool test38()
{
test(S("abcdefghijklmnopqrst"), 0, 1, S("abcdefghij"), 0, 0, 1);
test(S("abcdefghijklmnopqrst"), 0, 1, S("abcdefghij"), 0, 1, 0);
@@ -4168,10 +4244,12 @@ void test38()
test(S("abcdefghijklmnopqrst"), 0, 10, S("abcdefghij"), 11, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 10, S("abcdefghijklmnopqrst"), 0, 0, 10);
test(S("abcdefghijklmnopqrst"), 0, 10, S("abcdefghijklmnopqrst"), 0, 1, 9);
+
+ return true;
}
template <class S>
-void test39()
+TEST_CONSTEXPR_CXX20 bool test39()
{
test(S("abcdefghijklmnopqrst"), 0, 10, S("abcdefghijklmnopqrst"), 0, 10, 0);
test(S("abcdefghijklmnopqrst"), 0, 10, S("abcdefghijklmnopqrst"), 0, 19, -9);
@@ -4273,10 +4351,12 @@ void test39()
test(S("abcdefghijklmnopqrst"), 0, 20, S(""), 0, 1, 20);
test(S("abcdefghijklmnopqrst"), 0, 20, S(""), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 0, 20, S("abcde"), 0, 0, 20);
+
+ return true;
}
template <class S>
-void test40()
+TEST_CONSTEXPR_CXX20 bool test40()
{
test(S("abcdefghijklmnopqrst"), 0, 20, S("abcde"), 0, 1, 19);
test(S("abcdefghijklmnopqrst"), 0, 20, S("abcde"), 0, 2, 18);
@@ -4378,10 +4458,12 @@ void test40()
test(S("abcdefghijklmnopqrst"), 0, 21, S("abcdefghij"), 0, 1, 19);
test(S("abcdefghijklmnopqrst"), 0, 21, S("abcdefghij"), 0, 5, 15);
test(S("abcdefghijklmnopqrst"), 0, 21, S("abcdefghij"), 0, 9, 11);
+
+ return true;
}
template <class S>
-void test41()
+TEST_CONSTEXPR_CXX20 bool test41()
{
test(S("abcdefghijklmnopqrst"), 0, 21, S("abcdefghij"), 0, 10, 10);
test(S("abcdefghijklmnopqrst"), 0, 21, S("abcdefghij"), 0, 11, 10);
@@ -4483,10 +4565,12 @@ void test41()
test(S("abcdefghijklmnopqrst"), 1, 0, S("abcdefghijklmnopqrst"), 0, 19, -19);
test(S("abcdefghijklmnopqrst"), 1, 0, S("abcdefghijklmnopqrst"), 0, 20, -20);
test(S("abcdefghijklmnopqrst"), 1, 0, S("abcdefghijklmnopqrst"), 0, 21, -20);
+
+ return true;
}
template <class S>
-void test42()
+TEST_CONSTEXPR_CXX20 bool test42()
{
test(S("abcdefghijklmnopqrst"), 1, 0, S("abcdefghijklmnopqrst"), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 1, 0, S("abcdefghijklmnopqrst"), 1, 1, -1);
@@ -4588,10 +4672,12 @@ void test42()
test(S("abcdefghijklmnopqrst"), 1, 9, S("abcde"), 0, 2, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, S("abcde"), 0, 4, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, S("abcde"), 0, 5, 1);
+
+ return true;
}
template <class S>
-void test43()
+TEST_CONSTEXPR_CXX20 bool test43()
{
test(S("abcdefghijklmnopqrst"), 1, 9, S("abcde"), 0, 6, 1);
test(S("abcdefghijklmnopqrst"), 1, 9, S("abcde"), 1, 0, 9);
@@ -4693,10 +4779,12 @@ void test43()
test(S("abcdefghijklmnopqrst"), 1, 18, S("abcdefghij"), 0, 11, 1);
test(S("abcdefghijklmnopqrst"), 1, 18, S("abcdefghij"), 1, 0, 18);
test(S("abcdefghijklmnopqrst"), 1, 18, S("abcdefghij"), 1, 1, 17);
+
+ return true;
}
template <class S>
-void test44()
+TEST_CONSTEXPR_CXX20 bool test44()
{
test(S("abcdefghijklmnopqrst"), 1, 18, S("abcdefghij"), 1, 4, 14);
test(S("abcdefghijklmnopqrst"), 1, 18, S("abcdefghij"), 1, 8, 10);
@@ -4798,10 +4886,12 @@ void test44()
test(S("abcdefghijklmnopqrst"), 1, 19, S("abcdefghijklmnopqrst"), 1, 1, 18);
test(S("abcdefghijklmnopqrst"), 1, 19, S("abcdefghijklmnopqrst"), 1, 9, 10);
test(S("abcdefghijklmnopqrst"), 1, 19, S("abcdefghijklmnopqrst"), 1, 18, 1);
+
+ return true;
}
template <class S>
-void test45()
+TEST_CONSTEXPR_CXX20 bool test45()
{
test(S("abcdefghijklmnopqrst"), 1, 19, S("abcdefghijklmnopqrst"), 1, 19, 0);
test(S("abcdefghijklmnopqrst"), 1, 19, S("abcdefghijklmnopqrst"), 1, 20, 0);
@@ -4903,10 +4993,12 @@ void test45()
test(S("abcdefghijklmnopqrst"), 10, 0, S("abcde"), 1, 0, 0);
test(S("abcdefghijklmnopqrst"), 10, 0, S("abcde"), 1, 1, -1);
test(S("abcdefghijklmnopqrst"), 10, 0, S("abcde"), 1, 2, -2);
+
+ return true;
}
template <class S>
-void test46()
+TEST_CONSTEXPR_CXX20 bool test46()
{
test(S("abcdefghijklmnopqrst"), 10, 0, S("abcde"), 1, 3, -3);
test(S("abcdefghijklmnopqrst"), 10, 0, S("abcde"), 1, 4, -4);
@@ -5008,10 +5100,12 @@ void test46()
test(S("abcdefghijklmnopqrst"), 10, 1, S("abcdefghij"), 1, 8, 9);
test(S("abcdefghijklmnopqrst"), 10, 1, S("abcdefghij"), 1, 9, 9);
test(S("abcdefghijklmnopqrst"), 10, 1, S("abcdefghij"), 1, 10, 9);
+
+ return true;
}
template <class S>
-void test47()
+TEST_CONSTEXPR_CXX20 bool test47()
{
test(S("abcdefghijklmnopqrst"), 10, 1, S("abcdefghij"), 5, 0, 1);
test(S("abcdefghijklmnopqrst"), 10, 1, S("abcdefghij"), 5, 1, 5);
@@ -5113,10 +5207,12 @@ void test47()
test(S("abcdefghijklmnopqrst"), 10, 5, S("abcdefghijklmnopqrst"), 1, 20, 9);
test(S("abcdefghijklmnopqrst"), 10, 5, S("abcdefghijklmnopqrst"), 10, 0, 5);
test(S("abcdefghijklmnopqrst"), 10, 5, S("abcdefghijklmnopqrst"), 10, 1, 4);
+
+ return true;
}
template <class S>
-void test48()
+TEST_CONSTEXPR_CXX20 bool test48()
{
test(S("abcdefghijklmnopqrst"), 10, 5, S("abcdefghijklmnopqrst"), 10, 5, 0);
test(S("abcdefghijklmnopqrst"), 10, 5, S("abcdefghijklmnopqrst"), 10, 9, -4);
@@ -5218,10 +5314,12 @@ void test48()
test(S("abcdefghijklmnopqrst"), 10, 10, S("abcde"), 1, 4, 9);
test(S("abcdefghijklmnopqrst"), 10, 10, S("abcde"), 1, 5, 9);
test(S("abcdefghijklmnopqrst"), 10, 10, S("abcde"), 2, 0, 10);
+
+ return true;
}
template <class S>
-void test49()
+TEST_CONSTEXPR_CXX20 bool test49()
{
test(S("abcdefghijklmnopqrst"), 10, 10, S("abcde"), 2, 1, 8);
test(S("abcdefghijklmnopqrst"), 10, 10, S("abcde"), 2, 2, 8);
@@ -5323,10 +5421,12 @@ void test49()
test(S("abcdefghijklmnopqrst"), 10, 11, S("abcdefghij"), 5, 1, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, S("abcdefghij"), 5, 2, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, S("abcdefghij"), 5, 4, 5);
+
+ return true;
}
template <class S>
-void test50()
+TEST_CONSTEXPR_CXX20 bool test50()
{
test(S("abcdefghijklmnopqrst"), 10, 11, S("abcdefghij"), 5, 5, 5);
test(S("abcdefghijklmnopqrst"), 10, 11, S("abcdefghij"), 5, 6, 5);
@@ -5428,10 +5528,12 @@ void test50()
test(S("abcdefghijklmnopqrst"), 19, 0, S("abcdefghijklmnopqrst"), 10, 9, -9);
test(S("abcdefghijklmnopqrst"), 19, 0, S("abcdefghijklmnopqrst"), 10, 10, -10);
test(S("abcdefghijklmnopqrst"), 19, 0, S("abcdefghijklmnopqrst"), 10, 11, -10);
+
+ return true;
}
template <class S>
-void test51()
+TEST_CONSTEXPR_CXX20 bool test51()
{
test(S("abcdefghijklmnopqrst"), 19, 0, S("abcdefghijklmnopqrst"), 19, 0, 0);
test(S("abcdefghijklmnopqrst"), 19, 0, S("abcdefghijklmnopqrst"), 19, 1, -1);
@@ -5533,10 +5635,12 @@ void test51()
test(S("abcdefghijklmnopqrst"), 19, 2, S("abcde"), 2, 2, 17);
test(S("abcdefghijklmnopqrst"), 19, 2, S("abcde"), 2, 3, 17);
test(S("abcdefghijklmnopqrst"), 19, 2, S("abcde"), 2, 4, 17);
+
+ return true;
}
template <class S>
-void test52()
+TEST_CONSTEXPR_CXX20 bool test52()
{
test(S("abcdefghijklmnopqrst"), 19, 2, S("abcde"), 4, 0, 1);
test(S("abcdefghijklmnopqrst"), 19, 2, S("abcde"), 4, 1, 15);
@@ -5638,10 +5742,12 @@ void test52()
test(S("abcdefghijklmnopqrst"), 20, 0, S("abcdefghij"), 5, 6, -5);
test(S("abcdefghijklmnopqrst"), 20, 0, S("abcdefghij"), 9, 0, 0);
test(S("abcdefghijklmnopqrst"), 20, 0, S("abcdefghij"), 9, 1, -1);
+
+ return true;
}
template <class S>
-void test53()
+TEST_CONSTEXPR_CXX20 bool test53()
{
test(S("abcdefghijklmnopqrst"), 20, 0, S("abcdefghij"), 9, 2, -1);
test(S("abcdefghijklmnopqrst"), 20, 0, S("abcdefghij"), 10, 0, 0);
@@ -5743,10 +5849,12 @@ void test53()
test(S("abcdefghijklmnopqrst"), 20, 1, S("abcdefghijklmnopqrst"), 19, 1, -1);
test(S("abcdefghijklmnopqrst"), 20, 1, S("abcdefghijklmnopqrst"), 19, 2, -1);
test(S("abcdefghijklmnopqrst"), 20, 1, S("abcdefghijklmnopqrst"), 20, 0, 0);
+
+ return true;
}
template <class S>
-void test54()
+TEST_CONSTEXPR_CXX20 bool test54()
{
test(S("abcdefghijklmnopqrst"), 20, 1, S("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghijklmnopqrst"), 20, 1, S("abcdefghijklmnopqrst"), 21, 0, 0);
@@ -5824,139 +5932,144 @@ void test54()
test(S("abcdefghijklmnopqrst"), 21, 0, S("abcdefghijklmnopqrst"), 20, 0, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, S("abcdefghijklmnopqrst"), 20, 1, 0);
test(S("abcdefghijklmnopqrst"), 21, 0, S("abcdefghijklmnopqrst"), 21, 0, 0);
+
+ return true;
}
template<class S>
-void test55()
+TEST_CONSTEXPR_CXX20 bool test55()
{
test_npos(S(""), 0, 0, S(""), 0, 0);
test_npos(S(""), 0, 0, S("abcde"), 0, -5);
test_npos(S("abcde"), 0, 0, S("abcdefghij"), 0, -10);
test_npos(S("abcde"), 0, 0, S("abcdefghij"), 1, -9);
test_npos(S("abcde"), 0, 0, S("abcdefghij"), 5, -5);
+
+ return true;
+}
+
+template <class S>
+void test() {
+ test0<S>();
+ test1<S>();
+ test2<S>();
+ test3<S>();
+ test4<S>();
+ test5<S>();
+ test6<S>();
+ test7<S>();
+ test8<S>();
+ test9<S>();
+ test10<S>();
+ test11<S>();
+ test12<S>();
+ test13<S>();
+ test14<S>();
+ test15<S>();
+ test16<S>();
+ test17<S>();
+ test18<S>();
+ test19<S>();
+ test20<S>();
+ test21<S>();
+ test22<S>();
+ test23<S>();
+ test24<S>();
+ test25<S>();
+ test26<S>();
+ test27<S>();
+ test28<S>();
+ test29<S>();
+ test30<S>();
+ test31<S>();
+ test32<S>();
+ test33<S>();
+ test34<S>();
+ test35<S>();
+ test36<S>();
+ test37<S>();
+ test38<S>();
+ test39<S>();
+ test40<S>();
+ test41<S>();
+ test42<S>();
+ test43<S>();
+ test44<S>();
+ test45<S>();
+ test46<S>();
+ test47<S>();
+ test48<S>();
+ test49<S>();
+ test50<S>();
+ test51<S>();
+ test52<S>();
+ test53<S>();
+ test54<S>();
+ test55<S>();
+
+ // static_assert(test0<S>());
+ // static_assert(test1<S>());
+ // static_assert(test2<S>());
+ // static_assert(test3<S>());
+ // static_assert(test4<S>());
+ // static_assert(test5<S>());
+ // static_assert(test6<S>());
+ // static_assert(test7<S>());
+ // static_assert(test8<S>());
+ // static_assert(test9<S>());
+ // static_assert(test10<S>());
+ // static_assert(test11<S>());
+ // static_assert(test12<S>());
+ // static_assert(test13<S>());
+ // static_assert(test14<S>());
+ // static_assert(test15<S>());
+ // static_assert(test16<S>());
+ // static_assert(test17<S>());
+ // static_assert(test18<S>());
+ // static_assert(test19<S>());
+ // static_assert(test20<S>());
+ // static_assert(test21<S>());
+ // static_assert(test22<S>());
+ // static_assert(test23<S>());
+ // static_assert(test24<S>());
+ // static_assert(test25<S>());
+ // static_assert(test26<S>());
+ // static_assert(test27<S>());
+ // static_assert(test28<S>());
+ // static_assert(test29<S>());
+ // static_assert(test30<S>());
+ // static_assert(test31<S>());
+ // static_assert(test32<S>());
+ // static_assert(test33<S>());
+ // static_assert(test34<S>());
+ // static_assert(test35<S>());
+ // static_assert(test36<S>());
+ // static_assert(test37<S>());
+ // static_assert(test38<S>());
+ // static_assert(test39<S>());
+ // static_assert(test40<S>());
+ // static_assert(test41<S>());
+ // static_assert(test42<S>());
+ // static_assert(test43<S>());
+ // static_assert(test44<S>());
+ // static_assert(test45<S>());
+ // static_assert(test46<S>());
+ // static_assert(test47<S>());
+ // static_assert(test48<S>());
+ // static_assert(test49<S>());
+ // static_assert(test50<S>());
+ // static_assert(test51<S>());
+ // static_assert(test52<S>());
+ // static_assert(test53<S>());
+ // static_assert(test54<S>());
+ // static_assert(test55<S>());
}
int main(int, char**)
{
- {
- typedef std::string S;
- test0<S>();
- test1<S>();
- test2<S>();
- test3<S>();
- test4<S>();
- test5<S>();
- test6<S>();
- test7<S>();
- test8<S>();
- test9<S>();
- test10<S>();
- test11<S>();
- test12<S>();
- test13<S>();
- test14<S>();
- test15<S>();
- test16<S>();
- test17<S>();
- test18<S>();
- test19<S>();
- test20<S>();
- test21<S>();
- test22<S>();
- test23<S>();
- test24<S>();
- test25<S>();
- test26<S>();
- test27<S>();
- test28<S>();
- test29<S>();
- test30<S>();
- test31<S>();
- test32<S>();
- test33<S>();
- test34<S>();
- test35<S>();
- test36<S>();
- test37<S>();
- test38<S>();
- test39<S>();
- test40<S>();
- test41<S>();
- test42<S>();
- test43<S>();
- test44<S>();
- test45<S>();
- test46<S>();
- test47<S>();
- test48<S>();
- test49<S>();
- test50<S>();
- test51<S>();
- test52<S>();
- test53<S>();
- test54<S>();
- test55<S>();
- }
+ test<std::string>();
#if TEST_STD_VER >= 11
- {
- typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
- test0<S>();
- test1<S>();
- test2<S>();
- test3<S>();
- test4<S>();
- test5<S>();
- test6<S>();
- test7<S>();
- test8<S>();
- test9<S>();
- test10<S>();
- test11<S>();
- test12<S>();
- test13<S>();
- test14<S>();
- test15<S>();
- test16<S>();
- test17<S>();
- test18<S>();
- test19<S>();
- test20<S>();
- test21<S>();
- test22<S>();
- test23<S>();
- test24<S>();
- test25<S>();
- test26<S>();
- test27<S>();
- test28<S>();
- test29<S>();
- test30<S>();
- test31<S>();
- test32<S>();
- test33<S>();
- test34<S>();
- test35<S>();
- test36<S>();
- test37<S>();
- test38<S>();
- test39<S>();
- test40<S>();
- test41<S>();
- test42<S>();
- test43<S>();
- test44<S>();
- test45<S>();
- test46<S>();
- test47<S>();
- test48<S>();
- test49<S>();
- test50<S>();
- test51<S>();
- test52<S>();
- test53<S>();
- test54<S>();
- test55<S>();
- }
+ test<std::basic_string<char, std::char_traits<char>, min_allocator<char>>>();
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_view.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_view.pass.cpp
index ddc41f3c3094..19d749535d92 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_view.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/size_size_string_view.pass.cpp
@@ -18,7 +18,7 @@
#include "test_macros.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -28,7 +28,7 @@ int sign(int x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::size_type pos1, typename S::size_type n1,
SV sv, int x)
{
@@ -51,7 +51,7 @@ test(const S& s, typename S::size_type pos1, typename S::size_type n1,
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), 0, 0, SV(""), 0);
test(S(""), 0, 0, SV("abcde"), -5);
@@ -156,7 +156,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("abcde"), 6, 0, SV(""), 0);
test(S("abcde"), 6, 0, SV("abcde"), 0);
@@ -261,7 +261,7 @@ void test1()
}
template <class S, class SV>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("abcdefghijklmnopqrst"), 0, 0, SV(""), 0);
test(S("abcdefghijklmnopqrst"), 0, 0, SV("abcde"), -5);
@@ -361,23 +361,32 @@ void test2()
test(S("abcdefghijklmnopqrst"), 21, 0, SV("abcdefghijklmnopqrst"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
test2<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
test2<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/string.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/string.pass.cpp
index 76237cda8a8c..f73a42188b3f 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/string.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/string.pass.cpp
@@ -16,7 +16,7 @@
#include "test_macros.h"
#include "min_allocator.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -26,16 +26,15 @@ int sign(int x)
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, int x)
{
LIBCPP_ASSERT_NOEXCEPT(s.compare(str));
assert(sign(s.compare(str)) == sign(x));
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), S(""), 0);
test(S(""), S("abcde"), -5);
@@ -53,9 +52,9 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), S("abcde"), 15);
test(S("abcdefghijklmnopqrst"), S("abcdefghij"), 10);
test(S("abcdefghijklmnopqrst"), S("abcdefghijklmnopqrst"), 0);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), S(""), 0);
test(S(""), S("abcde"), -5);
@@ -73,14 +72,24 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), S("abcde"), 15);
test(S("abcdefghijklmnopqrst"), S("abcdefghij"), 10);
test(S("abcdefghijklmnopqrst"), S("abcdefghijklmnopqrst"), 0);
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.compare({"abc", 1}) < 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_compare/string_view.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_compare/string_view.pass.cpp
index fc145f8576c5..af75b6c863a3 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_compare/string_view.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_compare/string_view.pass.cpp
@@ -16,7 +16,7 @@
#include "test_macros.h"
#include "min_allocator.h"
-int sign(int x)
+TEST_CONSTEXPR_CXX20 int sign(int x)
{
if (x == 0)
return 0;
@@ -26,16 +26,15 @@ int sign(int x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, int x)
{
LIBCPP_ASSERT_NOEXCEPT(s.compare(sv));
assert(sign(s.compare(sv)) == sign(x));
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test(S(""), SV(""), 0);
@@ -54,9 +53,9 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), SV("abcde"), 15);
test(S("abcdefghijklmnopqrst"), SV("abcdefghij"), 10);
test(S("abcdefghijklmnopqrst"), SV("abcdefghijklmnopqrst"), 0);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test(S(""), SV(""), 0);
@@ -75,7 +74,17 @@ int main(int, char**)
test(S("abcdefghijklmnopqrst"), SV("abcde"), 15);
test(S("abcdefghijklmnopqrst"), SV("abcdefghij"), 10);
test(S("abcdefghijklmnopqrst"), SV("abcdefghijklmnopqrst"), 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/char_size.pass.cpp
index bf688efbfe3b..996acf377497 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_not_of(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(x < s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'q', 0, S::npos);
test(S(""), 'q', 1, S::npos);
@@ -68,9 +67,9 @@ int main(int, char**)
test(S("csope"), 'q', 0);
test(S("gfsmthlkon"), 'q', 0);
test(S("laenfsbridchgotmkqpj"), 'q', 0);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'q', 0, S::npos);
test(S(""), 'q', 1, S::npos);
@@ -99,7 +98,17 @@ int main(int, char**)
test(S("csope"), 'q', 0);
test(S("gfsmthlkon"), 'q', 0);
test(S("laenfsbridchgotmkqpj"), 'q', 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size.pass.cpp
index 37ae43e77a37..97a4d500811f 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
assert(s.find_first_not_of(str) == x);
@@ -37,7 +37,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, S::npos);
test(S(""), "laenf", 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", S::npos);
test(S(""), "laenf", S::npos);
@@ -142,19 +142,28 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), "htaobedqikfplcgjsmrn", S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size_size.pass.cpp
index d4776a0c56cf..0d6224f3ba63 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/pointer_size_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type n, typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, S::npos);
test(S(""), "irkhs", 0, 0, S::npos);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("gbhqo"), "skqne", 5, 4, S::npos);
test(S("ktdor"), "kipnf", 5, 5, S::npos);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("sdpcilonqj"), "dnjfsagktr", 10, 5, S::npos);
test(S("gtfbdkqeml"), "nejaktmiqg", 10, 9, S::npos);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("pboqganrhedjmltsicfk"), "gbkhdnpoietfcmrslajq", 20, 1, S::npos);
test(S("klchabsimetjnqgorfpd"), "rtfnmbsglkjaichoqedp", 20, 10, S::npos);
@@ -367,23 +367,32 @@ void test3()
test(S("hnbrcplsjfgiktoedmaq"), "qprlsfojamgndekthibc", 21, 20, S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_size.pass.cpp
index 554b4a687d57..6101dce71f47 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_not_of(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_not_of(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, S::npos);
test(S(""), S("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), S::npos);
test(S(""), S("laenf"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), S("htaobedqikfplcgjsmrn"), S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.find_first_not_of({"abc", 1}) == 0);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_view_size.pass.cpp
index 51cef0fea4b0..01ce8bc3a134 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.not.of/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_not_of(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_not_of(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, S::npos);
test(S(""), SV("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), S::npos);
test(S(""), SV("laenf"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), SV("htaobedqikfplcgjsmrn"), S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/char_size.pass.cpp
index 7ba1c618de64..b3884ac42362 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(x < s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'e', 0, S::npos);
test(S(""), 'e', 1, S::npos);
@@ -66,9 +65,9 @@ int main(int, char**)
test(S("csope"), 'e', 4);
test(S("gfsmthlkon"), 'e', S::npos);
test(S("laenfsbridchgotmkqpj"), 'e', 2);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'e', 0, S::npos);
test(S(""), 'e', 1, S::npos);
@@ -95,7 +94,17 @@ int main(int, char**)
test(S("csope"), 'e', 4);
test(S("gfsmthlkon"), 'e', S::npos);
test(S("laenfsbridchgotmkqpj"), 'e', 2);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size.pass.cpp
index a54f4801a5b9..27952f1ded78 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(str));
@@ -38,7 +38,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, S::npos);
test(S(""), "laenf", 0, S::npos);
@@ -123,7 +123,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", S::npos);
test(S(""), "laenf", S::npos);
@@ -143,19 +143,28 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), "htaobedqikfplcgjsmrn", 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size_size.pass.cpp
index e95066aeb2c7..c815160df440 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/pointer_size_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type n, typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, S::npos);
test(S(""), "irkhs", 0, 0, S::npos);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("gbhqo"), "skqne", 5, 4, S::npos);
test(S("ktdor"), "kipnf", 5, 5, S::npos);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("sdpcilonqj"), "dnjfsagktr", 10, 5, S::npos);
test(S("gtfbdkqeml"), "nejaktmiqg", 10, 9, S::npos);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("pboqganrhedjmltsicfk"), "gbkhdnpoietfcmrslajq", 20, 1, S::npos);
test(S("klchabsimetjnqgorfpd"), "rtfnmbsglkjaichoqedp", 20, 10, S::npos);
@@ -367,23 +367,32 @@ void test3()
test(S("hnbrcplsjfgiktoedmaq"), "qprlsfojamgndekthibc", 21, 20, S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_size.pass.cpp
index d30254adb3ed..2f065d5f3797 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, S::npos);
test(S(""), S("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), S::npos);
test(S(""), S("laenf"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), S("htaobedqikfplcgjsmrn"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.find_first_of({"abc", 1}) == std::string::npos);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_view_size.pass.cpp
index 7570c33465d4..762c11302f72 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.first.of/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_first_of(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, S::npos);
test(S(""), SV("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), S::npos);
test(S(""), SV("laenf"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), SV("htaobedqikfplcgjsmrn"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/char_size.pass.cpp
index 25120bb17a4b..e919f29c8611 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(x < s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'i', 0, S::npos);
test(S(""), 'i', 1, S::npos);
@@ -66,9 +65,9 @@ int main(int, char**)
test(S("csope"), 'i', 4);
test(S("gfsmthlkon"), 'i', 9);
test(S("laenfsbridchgotmkqpj"), 'i', 19);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'i', 0, S::npos);
test(S(""), 'i', 1, S::npos);
@@ -95,7 +94,17 @@ int main(int, char**)
test(S("csope"), 'i', 4);
test(S("gfsmthlkon"), 'i', 9);
test(S("laenfsbridchgotmkqpj"), 'i', 19);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size.pass.cpp
index 56c0ee74ebc2..2d34815e5fcc 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(str));
@@ -38,7 +38,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, S::npos);
test(S(""), "laenf", 0, S::npos);
@@ -123,7 +123,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", S::npos);
test(S(""), "laenf", S::npos);
@@ -143,19 +143,28 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), "htaobedqikfplcgjsmrn", S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size_size.pass.cpp
index 0b4eb9698cc8..d0bfd8610c23 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/pointer_size_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type n, typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, S::npos);
test(S(""), "irkhs", 0, 0, S::npos);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("gbhqo"), "skqne", 5, 4, 4);
test(S("ktdor"), "kipnf", 5, 5, 4);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("sdpcilonqj"), "dnjfsagktr", 10, 5, 8);
test(S("gtfbdkqeml"), "nejaktmiqg", 10, 9, 9);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("pboqganrhedjmltsicfk"), "gbkhdnpoietfcmrslajq", 20, 1, 19);
test(S("klchabsimetjnqgorfpd"), "rtfnmbsglkjaichoqedp", 20, 10, 19);
@@ -367,23 +367,32 @@ void test3()
test(S("hnbrcplsjfgiktoedmaq"), "qprlsfojamgndekthibc", 21, 20, S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_size.pass.cpp
index 1c2bd125c91c..25e20ff0340f 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, S::npos);
test(S(""), S("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), S::npos);
test(S(""), S("laenf"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), S("htaobedqikfplcgjsmrn"), S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.find_last_not_of({"abc", 1}) == s.size() - 1);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_view_size.pass.cpp
index 9b7d8d0210ae..4b274fe0a8a8 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.not.of/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_not_of(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, S::npos);
test(S(""), SV("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), S::npos);
test(S(""), SV("laenf"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), SV("htaobedqikfplcgjsmrn"), S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
// typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
// typedef std::string_view SV;
// test0<S, SV>();
// test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/char_size.pass.cpp
index 3273ecd35104..0445d17c2a14 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(x < s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'm', 0, S::npos);
test(S(""), 'm', 1, S::npos);
@@ -66,9 +65,9 @@ int main(int, char**)
test(S("csope"), 'm', S::npos);
test(S("gfsmthlkon"), 'm', 3);
test(S("laenfsbridchgotmkqpj"), 'm', 15);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'm', 0, S::npos);
test(S(""), 'm', 1, S::npos);
@@ -95,7 +94,17 @@ int main(int, char**)
test(S("csope"), 'm', S::npos);
test(S("gfsmthlkon"), 'm', 3);
test(S("laenfsbridchgotmkqpj"), 'm', 15);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size.pass.cpp
index a86c104d9562..5b288c429ea9 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(str));
@@ -38,7 +38,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, S::npos);
test(S(""), "laenf", 0, S::npos);
@@ -123,7 +123,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", S::npos);
test(S(""), "laenf", S::npos);
@@ -143,19 +143,28 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), "htaobedqikfplcgjsmrn", 19);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size_size.pass.cpp
index c3420d5cfaa8..8ff4556e5383 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/pointer_size_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type n, typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, S::npos);
test(S(""), "irkhs", 0, 0, S::npos);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("gbhqo"), "skqne", 5, 4, 3);
test(S("ktdor"), "kipnf", 5, 5, 0);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("sdpcilonqj"), "dnjfsagktr", 10, 5, 9);
test(S("gtfbdkqeml"), "nejaktmiqg", 10, 9, 8);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("pboqganrhedjmltsicfk"), "gbkhdnpoietfcmrslajq", 20, 1, 4);
test(S("klchabsimetjnqgorfpd"), "rtfnmbsglkjaichoqedp", 20, 10, 17);
@@ -367,23 +367,32 @@ void test3()
test(S("hnbrcplsjfgiktoedmaq"), "qprlsfojamgndekthibc", 21, 20, 19);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_size.pass.cpp
index a6e8d1a69c0a..f65f9079a5df 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, S::npos);
test(S(""), S("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), S::npos);
test(S(""), S("laenf"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), S("htaobedqikfplcgjsmrn"), 19);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.find_last_of({"abc", 1}) == std::string::npos);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_view_size.pass.cpp
index a44ddd9bd159..1e0af418e806 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find.last.of/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find_last_of(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, S::npos);
test(S(""), SV("laenf"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), S::npos);
test(S(""), SV("laenf"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("pniotcfrhqsmgdkjbael"), SV("htaobedqikfplcgjsmrn"), 19);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/char_size.pass.cpp
index bbd846142abd..e6f9e39b28a9 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(0 <= x && x + 1 <= s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'c', 0, S::npos);
test(S(""), 'c', 1, S::npos);
@@ -66,9 +65,9 @@ int main(int, char**)
test(S("abcde"), 'c', 2);
test(S("abcdeabcde"), 'c', 2);
test(S("abcdeabcdeabcdeabcde"), 'c', 2);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'c', 0, S::npos);
test(S(""), 'c', 1, S::npos);
@@ -95,7 +94,17 @@ int main(int, char**)
test(S("abcde"), 'c', 2);
test(S("abcdeabcde"), 'c', 2);
test(S("abcdeabcdeabcdeabcde"), 'c', 2);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size.pass.cpp
index c06797a24588..cff4d75b2c9c 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -31,7 +31,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(str));
@@ -44,7 +44,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0);
test(S(""), "abcde", 0, S::npos);
@@ -129,7 +129,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", 0);
test(S(""), "abcde", S::npos);
@@ -149,19 +149,28 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size_size.pass.cpp
index 3ca6b3a7d5f8..3ce531de52c9 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/pointer_size_size.pass.cpp
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, 0);
test(S(""), "abcde", 0, 0, 0);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("abcde"), "abcde", 5, 4, S::npos);
test(S("abcde"), "abcde", 5, 5, S::npos);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("abcdeabcde"), "abcdeabcde", 10, 5, S::npos);
test(S("abcdeabcde"), "abcdeabcde", 10, 9, S::npos);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 20, 1, S::npos);
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 20, 10, S::npos);
@@ -367,23 +367,32 @@ void test3()
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 21, 20, S::npos);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp
index 3aef261a01fe..edc2adb1e3aa 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, 0);
test(S(""), S("abcde"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), 0);
test(S(""), S("abcde"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.find({"abc", 1}) == std::string::npos);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_view_size.pass.cpp
index c74b5a44a87c..2402e70e44ab 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_find/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_find/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.find(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, 0);
test(S(""), SV("abcde"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), 0);
test(S(""), SV("abcde"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), SV("abcdeabcdeabcdeabcde"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/char_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/char_size.pass.cpp
index 49a5fbc5f86d..77f4cbc4514a 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/char_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/char_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type pos,
typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, typename S::value_type c, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::value_type c, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(c));
@@ -37,9 +37,8 @@ test(const S& s, typename S::value_type c, typename S::size_type x)
assert(x + 1 <= s.size());
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test(S(""), 'b', 0, S::npos);
test(S(""), 'b', 1, S::npos);
@@ -66,9 +65,9 @@ int main(int, char**)
test(S("abcde"), 'b', 1);
test(S("abcdeabcde"), 'b', 6);
test(S("abcdeabcdeabcdeabcde"), 'b', 16);
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test(S(""), 'b', 0, S::npos);
test(S(""), 'b', 1, S::npos);
@@ -95,7 +94,17 @@ int main(int, char**)
test(S("abcde"), 'b', 1);
test(S("abcdeabcde"), 'b', 6);
test(S("abcdeabcdeabcdeabcde"), 'b', 16);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size.pass.cpp
index 400616140598..c05844fcedd4 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type x)
{
@@ -31,7 +31,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(str));
@@ -45,7 +45,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0);
test(S(""), "abcde", 0, S::npos);
@@ -130,7 +130,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), "", 0);
test(S(""), "abcde", S::npos);
@@ -150,19 +150,28 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size_size.pass.cpp
index 2bcb486d9952..339708979d09 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/pointer_size_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const typename S::value_type* str, typename S::size_type pos,
typename S::size_type n, typename S::size_type x)
{
@@ -28,7 +28,7 @@ test(const S& s, const typename S::value_type* str, typename S::size_type pos,
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), "", 0, 0, 0);
test(S(""), "abcde", 0, 0, 0);
@@ -133,7 +133,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S("abcde"), "abcde", 5, 4, 0);
test(S("abcde"), "abcde", 5, 5, 0);
@@ -238,7 +238,7 @@ void test1()
}
template <class S>
-void test2()
+TEST_CONSTEXPR_CXX20 void test2()
{
test(S("abcdeabcde"), "abcdeabcde", 10, 5, 5);
test(S("abcdeabcde"), "abcdeabcde", 10, 9, 0);
@@ -343,7 +343,7 @@ void test2()
}
template <class S>
-void test3()
+TEST_CONSTEXPR_CXX20 void test3()
{
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 20, 1, 15);
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 20, 10, 10);
@@ -367,23 +367,32 @@ void test3()
test(S("abcdeabcdeabcdeabcde"), "abcdeabcdeabcdeabcde", 21, 20, 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
test2<S>();
test3<S>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_size.pass.cpp
index a1932c7bcf62..62a87562bd12 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(str, pos));
@@ -27,7 +27,7 @@ test(const S& s, const S& str, typename S::size_type pos, typename S::size_type
}
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, const S& str, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(str));
@@ -37,7 +37,7 @@ test(const S& s, const S& str, typename S::size_type x)
}
template <class S>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), S(""), 0, 0);
test(S(""), S("abcde"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), S(""), 0);
test(S(""), S("abcde"), S::npos);
@@ -142,26 +142,35 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), S("abcdeabcdeabcdeabcde"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
test0<S>();
test1<S>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
test0<S>();
test1<S>();
- }
+ }
#endif
#if TEST_STD_VER > 3
- { // LWG 2946
+ { // LWG 2946
std::string s = " !";
assert(s.rfind({"abc", 1}) == std::string::npos);
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_view_size.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_view_size.pass.cpp
index ed9d87596532..541f8fbc73f2 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_view_size.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_rfind/string_view_size.pass.cpp
@@ -17,7 +17,7 @@
#include "min_allocator.h"
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(sv, pos));
@@ -27,7 +27,7 @@ test(const S& s, SV sv, typename S::size_type pos, typename S::size_type x)
}
template <class S, class SV>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, SV sv, typename S::size_type x)
{
LIBCPP_ASSERT_NOEXCEPT(s.rfind(sv));
@@ -37,7 +37,7 @@ test(const S& s, SV sv, typename S::size_type x)
}
template <class S, class SV>
-void test0()
+TEST_CONSTEXPR_CXX20 void test0()
{
test(S(""), SV(""), 0, 0);
test(S(""), SV("abcde"), 0, S::npos);
@@ -122,7 +122,7 @@ void test0()
}
template <class S, class SV>
-void test1()
+TEST_CONSTEXPR_CXX20 void test1()
{
test(S(""), SV(""), 0);
test(S(""), SV("abcde"), S::npos);
@@ -142,21 +142,30 @@ void test1()
test(S("abcdeabcdeabcdeabcde"), SV("abcdeabcdeabcdeabcde"), 0);
}
-int main(int, char**)
-{
- {
+bool test() {
+ {
typedef std::string S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
#if TEST_STD_VER >= 11
- {
+ {
typedef std::basic_string<char, std::char_traits<char>, min_allocator<char>> S;
typedef std::string_view SV;
test0<S, SV>();
test1<S, SV>();
- }
+ }
+#endif
+
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
#endif
return 0;
diff --git a/libcxx/test/std/strings/basic.string/string.ops/string_substr/substr.pass.cpp b/libcxx/test/std/strings/basic.string/string.ops/string_substr/substr.pass.cpp
index 27af66e0670b..00d29c0de0e3 100644
--- a/libcxx/test/std/strings/basic.string/string.ops/string_substr/substr.pass.cpp
+++ b/libcxx/test/std/strings/basic.string/string.ops/string_substr/substr.pass.cpp
@@ -19,7 +19,7 @@
#include "min_allocator.h"
template <class S>
-void
+TEST_CONSTEXPR_CXX20 void
test(const S& s, typename S::size_type pos, typename S::size_type n)
{
if (pos <= s.size())
@@ -47,8 +47,7 @@ test(const S& s, typename S::size_type pos, typename S::size_type n)
#endif
}
-int main(int, char**)
-{
+bool test() {
{
typedef std::string S;
test(S(""), 0, 0);
@@ -174,5 +173,15 @@ int main(int, char**)
}
#endif
+ return true;
+}
+
+int main(int, char**)
+{
+ test();
+#if TEST_STD_VER > 17
+ // static_assert(test());
+#endif
+
return 0;
}
diff --git a/libcxx/test/std/strings/string.view/trivially_copyable.compile.pass.cpp b/libcxx/test/std/strings/string.view/trivially_copyable.compile.pass.cpp
index 6a22864b23f1..e8072585a660 100644
--- a/libcxx/test/std/strings/string.view/trivially_copyable.compile.pass.cpp
+++ b/libcxx/test/std/strings/string.view/trivially_copyable.compile.pass.cpp
@@ -19,7 +19,7 @@ static_assert(std::is_trivially_copyable<std::basic_string_view<char> >::value,
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
static_assert(std::is_trivially_copyable<std::basic_string_view<wchar_t> >::value, "");
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
static_assert(std::is_trivially_copyable<std::basic_string_view<char8_t> >::value, "");
#endif
static_assert(std::is_trivially_copyable<std::basic_string_view<char16_t> >::value, "");
diff --git a/libcxx/test/std/utilities/format/format.arguments/format.arg/ctor.pass.cpp b/libcxx/test/std/utilities/format/format.arguments/format.arg/ctor.pass.cpp
index 56fb05f1ad23..a15fe91ff638 100644
--- a/libcxx/test/std/utilities/format/format.arguments/format.arg/ctor.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.arguments/format.arg/ctor.pass.cpp
@@ -34,7 +34,7 @@ void test() {
void test() {
test<char>();
test<wchar_t>();
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test<char8_t>();
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/advance_to.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/advance_to.pass.cpp
index a2afa786e0bb..14b430580060 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/advance_to.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/advance_to.pass.cpp
@@ -52,7 +52,7 @@ void test() {
std::make_format_args<std::basic_format_context<
std::back_insert_iterator<std::basic_string<wchar_t>>, wchar_t>>()));
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(std::basic_format_args(
std::make_format_args<std::basic_format_context<
std::back_insert_iterator<std::basic_string<char8_t>>, char8_t>>()));
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/arg.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/arg.pass.cpp
index 3b4834110e84..db8121b9bec5 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/arg.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/arg.pass.cpp
@@ -34,6 +34,7 @@ void test() {
const std::basic_format_context context =
test_format_context_create(OutIt{output}, args);
LIBCPP_ASSERT(args.__size() == 4);
+ ASSERT_NOEXCEPT(context.arg(0));
for (size_t i = 0, e = args.__size(); i != e; ++i) {
assert(context.arg(i));
}
@@ -51,7 +52,7 @@ int main(int, char**) {
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
test<std::back_insert_iterator<std::basic_string<wchar_t>>, wchar_t>();
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test<std::back_insert_iterator<std::basic_string<char8_t>>, char8_t>();
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/ctor.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/ctor.pass.cpp
index 049672f30f1d..e56d87267064 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/ctor.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/ctor.pass.cpp
@@ -129,7 +129,7 @@ void test() {
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
test<std::back_insert_iterator<std::basic_string<wchar_t>>, wchar_t>();
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test<std::back_insert_iterator<std::basic_string<char8_t>>, char8_t>();
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/locale.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/locale.pass.cpp
index 3286991e69fd..6a85d3de0cdd 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/locale.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/locale.pass.cpp
@@ -85,7 +85,7 @@ void test() {
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
test<std::back_insert_iterator<std::basic_string<wchar_t>>, wchar_t>();
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test<std::back_insert_iterator<std::basic_string<char8_t>>, char8_t>();
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/out.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/out.pass.cpp
index b211dfd3e7fa..506c485d4a5b 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/out.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.context/format.context/out.pass.cpp
@@ -48,7 +48,7 @@ void test() {
std::make_format_args<std::basic_format_context<
std::back_insert_iterator<std::basic_string<wchar_t>>, wchar_t>>()));
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(std::basic_format_args(
std::make_format_args<std::basic_format_context<
std::back_insert_iterator<std::basic_string<char8_t>>, char8_t>>()));
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/types.compile.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/types.compile.pass.cpp
index fcddf832df2c..4db4f017e5ac 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/types.compile.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.formatter.spec/types.compile.pass.cpp
@@ -36,9 +36,6 @@
#include <set>
#include <stack>
#include <span>
-#ifndef _LIBCPP_HAS_NO_THREADS
-# include <thread>
-#endif
#include <tuple>
#include <type_traits>
#include <unordered_map>
@@ -51,6 +48,9 @@
#ifndef TEST_HAS_NO_LOCALIZATION
# include <regex>
#endif
+#ifndef TEST_HAS_NO_THREADS
+# include <thread>
+#endif
// Validate default template argument.
static_assert(std::same_as<std::formatter<int>, std::formatter<int, char>>);
@@ -217,7 +217,7 @@ void test_P1636() {
#ifndef TEST_HAS_NO_LOCALIZATION
assert_formatter_is_disabled<std::sub_match<CharT*>, CharT>();
#endif
-#ifndef _LIBCPP_HAS_NO_THREADS
+#ifndef TEST_HAS_NO_THREADS
assert_formatter_is_disabled<std::thread::id, CharT>();
#endif
assert_formatter_is_disabled<std::unique_ptr<int>, CharT>();
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/advance_to.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/advance_to.pass.cpp
index 88c7cf517189..7bb0d3e26610 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/advance_to.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/advance_to.pass.cpp
@@ -52,7 +52,7 @@ constexpr void test(const CharT* fmt) {
constexpr bool test() {
test("abc");
test(L"abc");
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(u8"abc");
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/begin.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/begin.pass.cpp
index c78c99554317..fd209e611c6b 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/begin.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/begin.pass.cpp
@@ -38,7 +38,7 @@ constexpr void test(const CharT* fmt) {
constexpr bool test() {
test("abc");
test(L"abc");
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(u8"abc");
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/ctor.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/ctor.pass.cpp
index e11d000f0e90..195f07742a01 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/ctor.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/ctor.pass.cpp
@@ -61,7 +61,7 @@ constexpr void test(const CharT* fmt) {
constexpr bool test() {
test("abc");
test(L"abc");
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(u8"abc");
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/end.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/end.pass.cpp
index 9f6c8dc65c6a..9a878ef42ba1 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/end.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/end.pass.cpp
@@ -38,7 +38,7 @@ constexpr void test(const CharT* fmt) {
constexpr bool test() {
test("abc");
test(L"abc");
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test(u8"abc");
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/types.compile.pass.cpp b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/types.compile.pass.cpp
index 412bff7f2c91..bc1398266b6c 100644
--- a/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/types.compile.pass.cpp
+++ b/libcxx/test/std/utilities/format/format.formatter/format.parse.ctx/types.compile.pass.cpp
@@ -50,7 +50,7 @@ constexpr void test() {
#ifndef TEST_HAS_NO_WIDE_CHARACTERS
test<wchar_t>();
#endif
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test<char8_t>();
#endif
#ifndef TEST_HAS_NO_UNICODE_CHARS
diff --git a/libcxx/test/std/utilities/format/format.functions/format_tests.h b/libcxx/test/std/utilities/format/format.functions/format_tests.h
index 4319f5fd1d4b..df9f4f6b454c 100644
--- a/libcxx/test/std/utilities/format/format.functions/format_tests.h
+++ b/libcxx/test/std/utilities/format/format.functions/format_tests.h
@@ -393,54 +393,7 @@ void format_test_bool(TestFunction check, ExceptionTest check_exception) {
// See locale-specific_form.pass.cpp
// *** type ***
- for (const auto& fmt : invalid_types<CharT>("bBcdosxX"))
- check_exception("The format-spec type has a type not supported for a bool argument", fmt, true);
-}
-
-template <class CharT, class TestFunction, class ExceptionTest>
-void format_test_bool_as_char(TestFunction check, ExceptionTest check_exception) {
- // *** align-fill & width ***
- check(SV("answer is '\1 '"), SV("answer is '{:6c}'"), true);
- check(SV("answer is ' \1'"), SV("answer is '{:>6c}'"), true);
- check(SV("answer is '\1 '"), SV("answer is '{:<6c}'"), true);
- check(SV("answer is ' \1 '"), SV("answer is '{:^6c}'"), true);
-
- check(SV("answer is '-----\1'"), SV("answer is '{:->6c}'"), true);
- check(SV("answer is '\1-----'"), SV("answer is '{:-<6c}'"), true);
- check(SV("answer is '--\1---'"), SV("answer is '{:-^6c}'"), true);
-
- check(std::basic_string_view<CharT>(CSTR("answer is '\0 '"), 18), SV("answer is '{:6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is '\0 '"), 18), SV("answer is '{:6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is ' \0'"), 18), SV("answer is '{:>6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is '\0 '"), 18), SV("answer is '{:<6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is ' \0 '"), 18), SV("answer is '{:^6c}'"), false);
-
- check(std::basic_string_view<CharT>(CSTR("answer is '-----\0'"), 18), SV("answer is '{:->6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is '\0-----'"), 18), SV("answer is '{:-<6c}'"), false);
- check(std::basic_string_view<CharT>(CSTR("answer is '--\0---'"), 18), SV("answer is '{:-^6c}'"), false);
-
- // *** Sign ***
- check_exception("A sign field isn't allowed in this format-spec", SV("{:-c}"), true);
- check_exception("A sign field isn't allowed in this format-spec", SV("{:+c}"), true);
- check_exception("A sign field isn't allowed in this format-spec", SV("{: c}"), true);
-
- // *** alternate form ***
- check_exception("An alternate form field isn't allowed in this format-spec", SV("{:#c}"), true);
-
- // *** zero-padding ***
- check_exception("A zero-padding field isn't allowed in this format-spec", SV("{:0c}"), true);
-
- // *** precision ***
- check_exception("The format-spec should consume the input or end with a '}'", SV("{:.c}"), true);
- check_exception("The format-spec should consume the input or end with a '}'", SV("{:.0c}"), true);
- check_exception("The format-spec should consume the input or end with a '}'", SV("{:.42c}"), true);
-
- // *** locale-specific form ***
- // Note it has no effect but it's allowed.
- check(SV("answer is '*'"), SV("answer is '{:Lc}'"), '*');
-
- // *** type ***
- for (const auto& fmt : invalid_types<CharT>("bBcdosxX"))
+ for (const auto& fmt : invalid_types<CharT>("bBdosxX"))
check_exception("The format-spec type has a type not supported for a bool argument", fmt, true);
}
@@ -2550,7 +2503,6 @@ void format_tests(TestFunction check, ExceptionTest check_exception) {
check(SV("hello false true"), SV("hello {} {}"), false, true);
format_test_bool<CharT>(check, check_exception);
- format_test_bool_as_char<CharT>(check, check_exception);
format_test_bool_as_integer<CharT>(check, check_exception);
// *** Test signed integral format argument ***
diff --git a/libcxx/test/std/utilities/utility/utility.intcmp/intcmp.fail.cpp b/libcxx/test/std/utilities/utility/utility.intcmp/intcmp.fail.cpp
index e822e7321087..a399cc7c1aee 100644
--- a/libcxx/test/std/utilities/utility/utility.intcmp/intcmp.fail.cpp
+++ b/libcxx/test/std/utilities/utility/utility.intcmp/intcmp.fail.cpp
@@ -72,7 +72,7 @@ constexpr void test() {
std::in_range<T>(int()); // expected-error 10-11 {{no matching function for call to 'in_range'}}
std::in_range<int>(T()); // expected-error 10-11 {{no matching function for call to 'in_range'}}
}
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
template <class T>
constexpr void test_char8t() {
std::cmp_equal(T(), T()); // expected-error 1 {{no matching function for call to 'cmp_equal'}}
@@ -96,7 +96,7 @@ constexpr void test_char8t() {
std::in_range<T>(int()); // expected-error 1 {{no matching function for call to 'in_range'}}
std::in_range<int>(T()); // expected-error 1 {{no matching function for call to 'in_range'}}
}
-#endif // _LIBCPP_HAS_NO_CHAR8_T
+#endif // TEST_HAS_NO_CHAR8_T
#ifndef TEST_HAS_NO_UNICODE_CHARS
template <class T>
@@ -139,9 +139,9 @@ int main(int, char**) {
test<std::nullptr_t>();
test<EmptyT>();
-#ifndef _LIBCPP_HAS_NO_CHAR8_T
+#ifndef TEST_HAS_NO_CHAR8_T
test_char8t<char8_t>();
-#endif // _LIBCPP_HAS_NO_CHAR8_T
+#endif // TEST_HAS_NO_CHAR8_T
#ifndef TEST_HAS_NO_UNICODE_CHARS
test_uchars<char16_t>();
diff --git a/libcxx/test/support/test_macros.h b/libcxx/test/support/test_macros.h
index c3794ab4685d..b5d768bff484 100644
--- a/libcxx/test/support/test_macros.h
+++ b/libcxx/test/support/test_macros.h
@@ -380,6 +380,14 @@ inline void DoNotOptimize(Tp const& value) {
# define TEST_HAS_NO_LOCALIZATION
#endif
+#if TEST_STD_VER <= 17 || !defined(__cpp_char8_t)
+# define TEST_HAS_NO_CHAR8_T
+#endif
+
+#if defined(_LIBCPP_HAS_NO_THREADS)
+# define TEST_HAS_NO_THREADS
+#endif
+
#if defined(__GNUC__)
#pragma GCC diagnostic pop
#endif
diff --git a/libcxx/utils/generate_private_header_tests.py b/libcxx/utils/generate_private_header_tests.py
index 81051a9f5ea7..e6eecd68ae50 100755
--- a/libcxx/utils/generate_private_header_tests.py
+++ b/libcxx/utils/generate_private_header_tests.py
@@ -53,7 +53,7 @@ def is_still_public(path):
rp = relative_path(path)
return not rp.startswith('__support') and rp not in [
"__bsd_locale_defaults.h", "__bsd_locale_fallbacks.h", "__config",
- "__config_site.in", "__debug", "__hash_table", "__functional_base",
+ "__config_site.in", "__debug", "__hash_table",
"__libcpp_version", "__threading_support", "__tree", "__undef_macros"
]
diff --git a/libcxx/utils/libcxx/test/params.py b/libcxx/utils/libcxx/test/params.py
index be4d892f741d..211cd189df4a 100644
--- a/libcxx/utils/libcxx/test/params.py
+++ b/libcxx/utils/libcxx/test/params.py
@@ -76,7 +76,6 @@ DEFAULT_PARAMETERS = [
actions=lambda modules: [
AddFeature('modules-build'),
AddCompileFlag('-fmodules'),
- AddCompileFlag('-Xclang -fmodules-local-submodule-visibility'),
] if modules else []),
Parameter(name='enable_exceptions', choices=[True, False], type=bool, default=True,
diff --git a/lld/MachO/Driver.cpp b/lld/MachO/Driver.cpp
index 636c4016dd6c..790f31a44a3b 100644
--- a/lld/MachO/Driver.cpp
+++ b/lld/MachO/Driver.cpp
@@ -505,14 +505,6 @@ static void initLLVM() {
}
static void compileBitcodeFiles() {
- // FIXME: Remove this once LTO.cpp honors config->exportDynamic.
- if (config->exportDynamic)
- for (InputFile *file : inputFiles)
- if (isa<BitcodeFile>(file)) {
- warn("the effect of -export_dynamic on LTO is not yet implemented");
- break;
- }
-
TimeTraceScope timeScope("LTO");
auto *lto = make<BitcodeCompiler>();
for (InputFile *file : inputFiles)
diff --git a/lld/MachO/LTO.cpp b/lld/MachO/LTO.cpp
index fd49a09229d1..c2863c78744c 100644
--- a/lld/MachO/LTO.cpp
+++ b/lld/MachO/LTO.cpp
@@ -64,6 +64,8 @@ void BitcodeCompiler::add(BitcodeFile &f) {
resols.reserve(objSyms.size());
// Provide a resolution to the LTO API for each symbol.
+ bool exportDynamic =
+ config->outputType != MH_EXECUTE || config->exportDynamic;
auto symIt = f.symbols.begin();
for (const lto::InputFile::Symbol &objSym : objSyms) {
resols.emplace_back();
@@ -77,12 +79,14 @@ void BitcodeCompiler::add(BitcodeFile &f) {
// be removed.
r.Prevailing = !objSym.isUndefined() && sym->getFile() == &f;
- // FIXME: What about other output types? And we can probably be less
- // restrictive with -flat_namespace, but it's an infrequent use case.
- // FIXME: Honor config->exportDynamic.
- r.VisibleToRegularObj = config->outputType != MH_EXECUTE ||
- config->namespaceKind == NamespaceKind::flat ||
- sym->isUsedInRegularObj;
+ if (const auto *defined = dyn_cast<Defined>(sym))
+ r.ExportDynamic =
+ defined->isExternal() && !defined->privateExtern && exportDynamic;
+ else if (const auto *common = dyn_cast<CommonSymbol>(sym))
+ r.ExportDynamic = !common->privateExtern && exportDynamic;
+
+ r.VisibleToRegularObj =
+ sym->isUsedInRegularObj || (r.Prevailing && r.ExportDynamic);
// Un-define the symbol so that we don't get duplicate symbol errors when we
// load the ObjFile emitted by LTO compilation.
diff --git a/lld/MachO/MapFile.cpp b/lld/MachO/MapFile.cpp
index 8f9381ff0d79..a4a0065c2816 100644
--- a/lld/MachO/MapFile.cpp
+++ b/lld/MachO/MapFile.cpp
@@ -31,6 +31,7 @@
#include "OutputSection.h"
#include "OutputSegment.h"
#include "Symbols.h"
+#include "SyntheticSections.h"
#include "Target.h"
#include "llvm/Support/Parallel.h"
#include "llvm/Support/TimeProfiler.h"
@@ -76,7 +77,27 @@ getSymbolStrings(ArrayRef<Defined *> syms) {
std::vector<std::string> str(syms.size());
parallelForEachN(0, syms.size(), [&](size_t i) {
raw_string_ostream os(str[i]);
- os << toString(*syms[i]);
+ Defined *sym = syms[i];
+
+ switch (sym->isec->kind()) {
+ case InputSection::CStringLiteralKind: {
+ // Output "literal string: <string literal>"
+ const auto *isec = cast<CStringInputSection>(sym->isec);
+ const StringPiece &piece = isec->getStringPiece(sym->value);
+ assert(
+ sym->value == piece.inSecOff &&
+ "We expect symbols to always point to the start of a StringPiece.");
+ StringRef str = isec->getStringRef(&piece - &(*isec->pieces.begin()));
+ assert(str.back() == '\000');
+ (os << "literal string: ")
+ // Remove null sequence at the end
+ .write_escaped(str.substr(0, str.size() - 1));
+ break;
+ }
+ case InputSection::ConcatKind:
+ case InputSection::WordLiteralKind:
+ os << toString(*sym);
+ }
});
DenseMap<Symbol *, std::string> ret;
diff --git a/lld/test/MachO/lto-internalize.ll b/lld/test/MachO/lto-internalize.ll
index c9bac63b730b..755484c216ff 100644
--- a/lld/test/MachO/lto-internalize.ll
+++ b/lld/test/MachO/lto-internalize.ll
@@ -12,6 +12,9 @@
; RUN: llvm-dis < %t/test.0.2.internalize.bc | FileCheck %s
; RUN: llvm-objdump --macho --syms %t/test | FileCheck %s --check-prefix=SYMTAB
+; CHECK: @comm = internal global
+; CHECK: @comm_hide = internal global
+
;; Check that main is not internalized. This covers the case of bitcode symbols
;; referenced by undefined symbols that don't belong to any InputFile.
; CHECK: define void @main()
@@ -28,18 +31,48 @@
;; internalized.
; CHECK: define internal void @baz()
-; Check foo and bar are not emitted to the .symtab
+;; Check that all internalized symbols are not emitted to the symtab
; SYMTAB-LABEL: SYMBOL TABLE:
-; SYMTAB-NEXT: g F __TEXT,__text _main
-; SYMTAB-NEXT: g F __TEXT,__text _used_in_regular_obj
-; SYMTAB-NEXT: g F __TEXT,__text __mh_execute_header
-; SYMTAB-NEXT: *UND* dyld_stub_binder
+; SYMTAB-DAG: g F __TEXT,__text _main
+; SYMTAB-DAG: g F __TEXT,__text _used_in_regular_obj
+; SYMTAB-DAG: g F __TEXT,__text __mh_execute_header
+; SYMTAB-DAG: *UND* dyld_stub_binder
; SYMTAB-EMPTY:
+; RUN: %lld -lSystem -dylib %t/test.o %t/baz.o %t/regular.o -o %t/test.dylib -save-temps
+; RUN: llvm-dis < %t/test.dylib.0.2.internalize.bc | FileCheck %s --check-prefix=DYN
+; RUN: llvm-nm -m %t/test.dylib | FileCheck %s --check-prefix=DYN-SYMS \
+; RUN: --implicit-check-not _foo
+
+; RUN: %lld -lSystem -export_dynamic %t/test.o %t/baz.o %t/regular.o -o %t/test.extdyn -save-temps
+; RUN: llvm-dis < %t/test.extdyn.0.2.internalize.bc
+; RUN: llvm-nm -m %t/test.extdyn | FileCheck %s --check-prefix=DYN-SYMS \
+; RUN: --implicit-check-not _foo
+
+;; Note that only foo() gets internalized here; everything else that isn't
+;; hidden must be exported.
+; DYN: @comm = common global
+; DYN: @comm_hide = internal global
+; DYN: define void @main()
+; DYN: define void @bar()
+; DYN: define internal void @foo()
+; DYN: define void @used_in_regular_obj()
+; DYN: define void @baz()
+
+; DYN-SYMS-DAG: (__TEXT,__text) external _bar
+; DYN-SYMS-DAG: (__TEXT,__text) external _baz
+; DYN-SYMS-DAG: (__DATA,__common) external _comm
+; DYN-SYMS-DAG: (__TEXT,__text) external _main
+; DYN-SYMS-DAG: (__TEXT,__text) external _used_in_regular_obj
+
;--- test.s
target triple = "x86_64-apple-macosx10.15.0"
target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+;; Common symbols are always external.
+@comm = common global i8 0, align 1
+@comm_hide = common hidden global i8 0, align 1
+
declare void @baz()
define void @main() {
diff --git a/lld/test/MachO/map-file.s b/lld/test/MachO/map-file.s
index 85c23e763e9e..11ad7f0079cc 100644
--- a/lld/test/MachO/map-file.s
+++ b/lld/test/MachO/map-file.s
@@ -2,6 +2,7 @@
# RUN: rm -rf %t; split-file %s %t
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %t/foo.s -o %t/foo.o
# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %t/test.s -o %t/test.o
+# RUN: llvm-mc -filetype=obj -triple=x86_64-apple-darwin %t/c-string-literal.s -o %t/c-string-literal.o
# RUN: %lld -map %t/map %t/test.o %t/foo.o --time-trace -o %t/test-map
# RUN: llvm-objdump --syms --section-headers %t/test-map > %t/objdump
@@ -9,17 +10,6 @@
# RUN: FileCheck %s < %t/out
# RUN: FileCheck %s --check-prefix=MAPFILE < %t/test-map.time-trace
-#--- foo.s
-.section __TEXT,obj
-.globl _foo
-_foo:
-
-#--- test.s
-.comm _number, 1
-.globl _main
-_main:
- ret
-
# CHECK: Sections:
# CHECK-NEXT: Idx Name Size VMA Type
# CHECK-NEXT: 0 __text {{[0-9a-f]+}} [[#%x,TEXT:]] TEXT
@@ -51,4 +41,53 @@ _main:
# CHECK-NEXT: 0x[[#FOO]] [ 2] _foo
# CHECK-NEXT: 0x[[#NUMBER]] [ 1] _number
+# RUN: %lld -map %t/c-string-literal-map %t/c-string-literal.o -o %t/c-string-literal-out
+# RUN: FileCheck --check-prefix=CSTRING %s < %t/c-string-literal-map
+
+## C-string literals should be printed as "literal string: <C string literal>"
+# CSTRING-LABEL: Symbols:
+# CSTRING-DAG: _main
+# CSTRING-DAG: literal string: Hello world!\n
+# CSTRING-DAG: literal string: Hello, it's me
+
+# RUN: %lld -dead_strip -map %t/dead-c-string-literal-map %t/c-string-literal.o -o %t/dead-c-string-literal-out
+# RUN: FileCheck --check-prefix=DEADCSTRING %s < %t/dead-c-string-literal-map
+
+## C-string literals should be printed as "literal string: <C string literal>"
+# DEADCSTRING-LABEL: Symbols:
+# DEADCSTRING-DAG: _main
+# DEADCSTRING-DAG: literal string: Hello world!\n
+# DEADCSTRING-LABEL: Dead Stripped Symbols:
+# DEADCSTRING-DAG: literal string: Hello, it's me
+
# MAPFILE: "name":"Total Write map file"
+
+#--- foo.s
+.section __TEXT,obj
+.globl _foo
+_foo:
+
+#--- test.s
+.comm _number, 1
+.globl _main
+_main:
+ ret
+
+#--- c-string-literal.s
+.section __TEXT,__cstring
+.globl _hello_world, _hello_its_me, _main
+
+_hello_world:
+.asciz "Hello world!\n"
+
+_hello_its_me:
+.asciz "Hello, it's me"
+
+.text
+_main:
+ movl $0x2000004, %eax # write() syscall
+ mov $1, %rdi # stdout
+ leaq _hello_world(%rip), %rsi
+ mov $13, %rdx # length of str
+ syscall
+ ret
diff --git a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
index 10137e714861..131d1932fe14 100644
--- a/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
+++ b/lldb/source/Plugins/ObjectFile/Mach-O/ObjectFileMachO.cpp
@@ -2711,7 +2711,7 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
if (process_shared_cache_uuid.IsValid() &&
process_shared_cache_uuid != UUID::fromOptionalData(&cache_uuid, 16))
return;
-
+ const bool pinned = dyld_shared_cache_pin_mapping(shared_cache);
dyld_shared_cache_for_each_image(shared_cache, ^(dyld_image_t image) {
uuid_t dsc_image_uuid;
if (found_image)
@@ -2768,6 +2768,8 @@ void ObjectFileMachO::ParseSymtab(Symtab &symtab) {
nlist_count = nlistCount;
});
});
+ if (pinned)
+ dyld_shared_cache_unpin_mapping(shared_cache);
});
if (nlist_buffer) {
DataExtractor dsc_local_symbols_data(nlist_buffer,
diff --git a/llvm/cmake/config-ix.cmake b/llvm/cmake/config-ix.cmake
index a138d372d3b2..c70b8b3787a0 100644
--- a/llvm/cmake/config-ix.cmake
+++ b/llvm/cmake/config-ix.cmake
@@ -650,7 +650,12 @@ else()
find_ocamlfind_package(ctypes VERSION 0.4 OPTIONAL)
if( HAVE_OCAML_CTYPES )
message(STATUS "OCaml bindings enabled.")
- find_ocamlfind_package(oUnit VERSION 2 OPTIONAL)
+ find_ocamlfind_package(ounit2 OPTIONAL)
+ if ( HAVE_OCAML_OUNIT2 )
+ set(HAVE_OCAML_OUNIT TRUE)
+ else()
+ find_ocamlfind_package(oUnit VERSION 2 OPTIONAL)
+ endif()
set(LLVM_BINDINGS "${LLVM_BINDINGS} ocaml")
set(LLVM_OCAML_INSTALL_PATH "${OCAML_STDLIB_PATH}" CACHE STRING
diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst
index 9b819dcd9419..6b44b7e7355c 100644
--- a/llvm/docs/LangRef.rst
+++ b/llvm/docs/LangRef.rst
@@ -24049,3 +24049,43 @@ Semantics:
The '``llvm.preserve.struct.access.index``' intrinsic produces the same result
as a getelementptr with base ``base`` and access operands ``{0, gep_index}``.
+
+'``llvm.fptrunc.round``' Intrinsic
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Syntax:
+"""""""
+
+::
+
+ declare <ty2>
+ @llvm.fptrunc.round(<type> <value>, metadata <rounding mode>)
+
+Overview:
+"""""""""
+
+The '``llvm.fptrunc.round``' intrinsic truncates
+:ref:`floating-point <t_floating>` ``value`` to type ``ty2``
+with a specified rounding mode.
+
+Arguments:
+""""""""""
+
+The '``llvm.fptrunc.round``' intrinsic takes a :ref:`floating-point
+<t_floating>` value to cast and a :ref:`floating-point <t_floating>` type
+to cast it to. This argument must be larger in size than the result.
+
+The second argument specifies the rounding mode as described in the constrained
+intrinsics section.
+For this intrinsic, the "round.dynamic" mode is not supported.
+
+Semantics:
+""""""""""
+
+The '``llvm.fptrunc.round``' intrinsic casts a ``value`` from a larger
+:ref:`floating-point <t_floating>` type to a smaller :ref:`floating-point
+<t_floating>` type.
+This intrinsic is assumed to execute in the default :ref:`floating-point
+environment <floatenv>` *except* for the rounding mode.
+This intrinsic is not supported on all targets. Some targets may not support
+all rounding modes.
diff --git a/llvm/docs/SourceLevelDebugging.rst b/llvm/docs/SourceLevelDebugging.rst
index e4a529d0e242..ae26268cb6d7 100644
--- a/llvm/docs/SourceLevelDebugging.rst
+++ b/llvm/docs/SourceLevelDebugging.rst
@@ -1086,6 +1086,10 @@ a Fortran front-end would generate the following descriptors:
!DILocalVariable(name: "string", arg: 1, scope: !10, file: !3, line: 4, type: !15)
!DIStringType(name: "character(*)!2", stringLength: !16, stringLengthExpression: !DIExpression(), size: 32)
+
+A fortran deferred-length character can also contain the information of raw storage of the characters in addition to the length of the string. This information is encoded in the stringLocationExpression field. Based on this information, DW_AT_data_location attribute is emitted in a DW_TAG_string_type debug info.
+
+ !DIStringType(name: "character(*)!2", stringLengthExpression: !DIExpression(), stringLocationExpression: !DIExpression(DW_OP_push_object_address, DW_OP_deref), size: 32)
and this will materialize in DWARF tags as:
@@ -1097,6 +1101,7 @@ and this will materialize in DWARF tags as:
0x00000064: DW_TAG_variable
DW_AT_location (DW_OP_fbreg +16)
DW_AT_type (0x00000083 "integer*8")
+ DW_AT_data_location (DW_OP_push_object_address, DW_OP_deref)
...
DW_AT_artificial (true)
diff --git a/llvm/include/llvm/Analysis/ScalarEvolution.h b/llvm/include/llvm/Analysis/ScalarEvolution.h
index 4cafeff400a7..c224346442f1 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolution.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolution.h
@@ -246,10 +246,6 @@ public:
/// Prints a textual representation of this predicate with an indentation of
/// \p Depth.
virtual void print(raw_ostream &OS, unsigned Depth = 0) const = 0;
-
- /// Returns the SCEV to which this predicate applies, or nullptr if this is
- /// a SCEVUnionPredicate.
- virtual const SCEV *getExpr() const = 0;
};
inline raw_ostream &operator<<(raw_ostream &OS, const SCEVPredicate &P) {
@@ -293,7 +289,6 @@ public:
bool implies(const SCEVPredicate *N) const override;
void print(raw_ostream &OS, unsigned Depth = 0) const override;
bool isAlwaysTrue() const override;
- const SCEV *getExpr() const override;
ICmpInst::Predicate getPredicate() const { return Pred; }
@@ -397,7 +392,7 @@ public:
IncrementWrapFlags getFlags() const { return Flags; }
/// Implementation of the SCEVPredicate interface
- const SCEV *getExpr() const override;
+ const SCEVAddRecExpr *getExpr() const;
bool implies(const SCEVPredicate *N) const override;
void print(raw_ostream &OS, unsigned Depth = 0) const override;
bool isAlwaysTrue() const override;
@@ -422,9 +417,6 @@ private:
/// Vector with references to all predicates in this union.
SmallVector<const SCEVPredicate *, 16> Preds;
- /// Maps SCEVs to predicates for quick look-ups.
- PredicateMap SCEVToPreds;
-
/// Adds a predicate to this union.
void add(const SCEVPredicate *N);
@@ -435,15 +427,10 @@ public:
return Preds;
}
- /// Returns a reference to a vector containing all predicates which apply to
- /// \p Expr.
- ArrayRef<const SCEVPredicate *> getPredicatesForExpr(const SCEV *Expr) const;
-
/// Implementation of the SCEVPredicate interface
bool isAlwaysTrue() const override;
bool implies(const SCEVPredicate *N) const override;
void print(raw_ostream &OS, unsigned Depth) const override;
- const SCEV *getExpr() const override;
/// We estimate the complexity of a union predicate as the size number of
/// predicates in the union.
diff --git a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
index ee305c895e43..4d15ac573546 100644
--- a/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
+++ b/llvm/include/llvm/Analysis/ScalarEvolutionExpressions.h
@@ -698,8 +698,11 @@ public:
case scUMinExpr:
case scSequentialUMinExpr:
case scAddRecExpr:
- for (const auto *Op : cast<SCEVNAryExpr>(S)->operands())
+ for (const auto *Op : cast<SCEVNAryExpr>(S)->operands()) {
push(Op);
+ if (Visitor.isDone())
+ break;
+ }
continue;
case scUDivExpr: {
const SCEVUDivExpr *UDiv = cast<SCEVUDivExpr>(S);
diff --git a/llvm/include/llvm/BinaryFormat/Swift.def b/llvm/include/llvm/BinaryFormat/Swift.def
index 4ec432001283..aa3ee4f1dc3f 100644
--- a/llvm/include/llvm/BinaryFormat/Swift.def
+++ b/llvm/include/llvm/BinaryFormat/Swift.def
@@ -28,3 +28,5 @@ HANDLE_SWIFT_SECTION(conform, "__swift5_proto", "swift5_protocol_confromances",
".sw5prtc$B")
HANDLE_SWIFT_SECTION(protocs, "__swift5_protos", "swift5_protocols",
".sw5prt$B")
+HANDLE_SWIFT_SECTION(acfuncs, "__swift5_acfuncs", "swift5_accessible_functions",
+ ".sw5acfn$B")
diff --git a/llvm/include/llvm/CodeGen/ISDOpcodes.h b/llvm/include/llvm/CodeGen/ISDOpcodes.h
index 9fedb531db0c..beb99b2ff5cf 100644
--- a/llvm/include/llvm/CodeGen/ISDOpcodes.h
+++ b/llvm/include/llvm/CodeGen/ISDOpcodes.h
@@ -462,6 +462,9 @@ enum NodeType {
STRICT_FSETCC,
STRICT_FSETCCS,
+ // FPTRUNC_ROUND - This corresponds to the fptrunc_round intrinsic.
+ FPTRUNC_ROUND,
+
/// FMA - Perform a * b + c with no intermediate rounding step.
FMA,
@@ -614,6 +617,17 @@ enum NodeType {
MULHU,
MULHS,
+ /// AVGFLOORS/AVGFLOORU - Averaging add - Add two integers using an integer of
+ /// type i[N+1], halving the result by shifting it one bit right.
+ /// shr(add(ext(X), ext(Y)), 1)
+ AVGFLOORS,
+ AVGFLOORU,
+ /// AVGCEILS/AVGCEILU - Rounding averaging add - Add two integers using an
+ /// integer of type i[N+2], add 1 and halve the result by shifting it one bit
+ /// right. shr(add(ext(X), ext(Y), 1), 1)
+ AVGCEILS,
+ AVGCEILU,
+
// ABDS/ABDU - Absolute difference - Return the absolute difference between
// two numbers interpreted as signed/unsigned.
// i.e trunc(abs(sext(Op0) - sext(Op1))) becomes abds(Op0, Op1)
diff --git a/llvm/include/llvm/CodeGen/MachineRegisterInfo.h b/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
index 2d34cd6a1660..d3b29081da6f 100644
--- a/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
+++ b/llvm/include/llvm/CodeGen/MachineRegisterInfo.h
@@ -835,23 +835,12 @@ public:
/// to refer to the designated register.
void updateDbgUsersToReg(MCRegister OldReg, MCRegister NewReg,
ArrayRef<MachineInstr *> Users) const {
- SmallSet<MCRegister, 4> OldRegUnits;
- for (MCRegUnitIterator RUI(OldReg, getTargetRegisterInfo()); RUI.isValid();
- ++RUI)
- OldRegUnits.insert(*RUI);
-
// If this operand is a register, check whether it overlaps with OldReg.
// If it does, replace with NewReg.
- auto UpdateOp = [this, &NewReg, &OldReg, &OldRegUnits](MachineOperand &Op) {
- if (Op.isReg()) {
- for (MCRegUnitIterator RUI(OldReg, getTargetRegisterInfo());
- RUI.isValid(); ++RUI) {
- if (OldRegUnits.contains(*RUI)) {
- Op.setReg(NewReg);
- break;
- }
- }
- }
+ auto UpdateOp = [this, &NewReg, &OldReg](MachineOperand &Op) {
+ if (Op.isReg() &&
+ getTargetRegisterInfo()->regsOverlap(Op.getReg(), OldReg))
+ Op.setReg(NewReg);
};
// Iterate through (possibly several) operands to DBG_VALUEs and update
diff --git a/llvm/include/llvm/CodeGen/TargetLowering.h b/llvm/include/llvm/CodeGen/TargetLowering.h
index 44f1773bdab3..ec9f9b73b8f6 100644
--- a/llvm/include/llvm/CodeGen/TargetLowering.h
+++ b/llvm/include/llvm/CodeGen/TargetLowering.h
@@ -2515,6 +2515,10 @@ public:
case ISD::FMAXNUM_IEEE:
case ISD::FMINIMUM:
case ISD::FMAXIMUM:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
return true;
default: return false;
}
diff --git a/llvm/include/llvm/IR/DIBuilder.h b/llvm/include/llvm/IR/DIBuilder.h
index d3aad3719900..c635659fd625 100644
--- a/llvm/include/llvm/IR/DIBuilder.h
+++ b/llvm/include/llvm/IR/DIBuilder.h
@@ -221,6 +221,23 @@ namespace llvm {
/// \param SizeInBits Size of the type.
DIStringType *createStringType(StringRef Name, uint64_t SizeInBits);
+ /// Create debugging information entry for Fortran
+ /// assumed length string type.
+ /// \param Name Type name.
+ /// \param StringLength String length expressed as DIVariable *.
+ /// \param StrLocationExp Optional memory location of the string.
+ DIStringType *createStringType(StringRef Name, DIVariable *StringLength,
+ DIExpression *StrLocationExp = nullptr);
+
+ /// Create debugging information entry for Fortran
+ /// assumed length string type.
+ /// \param Name Type name.
+ /// \param StringLengthExp String length expressed in DIExpression form.
+ /// \param StrLocationExp Optional memory location of the string.
+ DIStringType *createStringType(StringRef Name,
+ DIExpression *StringLengthExp,
+ DIExpression *StrLocationExp = nullptr);
+
/// Create debugging information entry for a qualified
/// type, e.g. 'const int'.
/// \param Tag Tag identifing type, e.g. dwarf::TAG_volatile_type
diff --git a/llvm/include/llvm/IR/InstrTypes.h b/llvm/include/llvm/IR/InstrTypes.h
index 589926c0faf1..35c1ef5514b2 100644
--- a/llvm/include/llvm/IR/InstrTypes.h
+++ b/llvm/include/llvm/IR/InstrTypes.h
@@ -1761,7 +1761,7 @@ public:
return nullptr;
}
- /// Extract the preallocated type for a call or parameter.
+ /// Extract the inalloca type for a call or parameter.
Type *getParamInAllocaType(unsigned ArgNo) const {
if (auto *Ty = Attrs.getParamInAllocaType(ArgNo))
return Ty;
@@ -1770,6 +1770,22 @@ public:
return nullptr;
}
+ /// Extract the sret type for a call or parameter.
+ Type *getParamStructRetType(unsigned ArgNo) const {
+ if (auto *Ty = Attrs.getParamStructRetType(ArgNo))
+ return Ty;
+ if (const Function *F = getCalledFunction())
+ return F->getAttributes().getParamStructRetType(ArgNo);
+ return nullptr;
+ }
+
+ /// Extract the elementtype type for a parameter.
+ /// Note that elementtype() can only be applied to call arguments, not
+ /// function declaration parameters.
+ Type *getParamElementType(unsigned ArgNo) const {
+ return Attrs.getParamElementType(ArgNo);
+ }
+
/// Extract the number of dereferenceable bytes for a call or
/// parameter (0=unknown).
uint64_t getRetDereferenceableBytes() const {
diff --git a/llvm/include/llvm/IR/Intrinsics.td b/llvm/include/llvm/IR/Intrinsics.td
index e7d38276f35e..7991bc0d039c 100644
--- a/llvm/include/llvm/IR/Intrinsics.td
+++ b/llvm/include/llvm/IR/Intrinsics.td
@@ -909,6 +909,12 @@ let IntrProperties = [IntrInaccessibleMemOnly, IntrWillReturn] in {
}
// FIXME: Consider maybe adding intrinsics for sitofp, uitofp.
+
+// Truncate a floating point number with a specific rounding mode
+def int_fptrunc_round : DefaultAttrsIntrinsic<[ llvm_anyfloat_ty ],
+ [ llvm_anyfloat_ty, llvm_metadata_ty ],
+ [ IntrNoMem, IntrWillReturn ]>;
+
//===------------------------- Expect Intrinsics --------------------------===//
//
def int_expect : DefaultAttrsIntrinsic<[llvm_anyint_ty],
diff --git a/llvm/include/llvm/ProfileData/SampleProf.h b/llvm/include/llvm/ProfileData/SampleProf.h
index bad2139fe8f0..2e255b90d1b5 100644
--- a/llvm/include/llvm/ProfileData/SampleProf.h
+++ b/llvm/include/llvm/ProfileData/SampleProf.h
@@ -413,6 +413,8 @@ enum ContextAttributeMask {
ContextNone = 0x0,
ContextWasInlined = 0x1, // Leaf of context was inlined in previous build
ContextShouldBeInlined = 0x2, // Leaf of context should be inlined
+ ContextDuplicatedIntoBase =
+ 0x4, // Leaf of context is duplicated into the base profile
};
// Represents a context frame with function name and line location
diff --git a/llvm/include/llvm/Support/TargetOpcodes.def b/llvm/include/llvm/Support/TargetOpcodes.def
index 428cbb44705d..ca8876f51fae 100644
--- a/llvm/include/llvm/Support/TargetOpcodes.def
+++ b/llvm/include/llvm/Support/TargetOpcodes.def
@@ -322,6 +322,9 @@ HANDLE_TARGET_OPCODE(G_BITCAST)
/// Generic freeze.
HANDLE_TARGET_OPCODE(G_FREEZE)
+// INTRINSIC fptrunc_round intrinsic.
+HANDLE_TARGET_OPCODE(G_INTRINSIC_FPTRUNC_ROUND)
+
/// INTRINSIC trunc intrinsic.
HANDLE_TARGET_OPCODE(G_INTRINSIC_TRUNC)
diff --git a/llvm/include/llvm/Target/GenericOpcodes.td b/llvm/include/llvm/Target/GenericOpcodes.td
index 2af20ab6a53f..8ec12a9bc5d0 100644
--- a/llvm/include/llvm/Target/GenericOpcodes.td
+++ b/llvm/include/llvm/Target/GenericOpcodes.td
@@ -965,6 +965,12 @@ def G_FNEARBYINT : GenericInstruction {
//------------------------------------------------------------------------------
// Opcodes for LLVM Intrinsics
//------------------------------------------------------------------------------
+def G_INTRINSIC_FPTRUNC_ROUND : GenericInstruction {
+ let OutOperandList = (outs type0:$dst);
+ let InOperandList = (ins type1:$src1, i32imm:$round_mode);
+ let hasSideEffects = false;
+}
+
def G_INTRINSIC_TRUNC : GenericInstruction {
let OutOperandList = (outs type0:$dst);
let InOperandList = (ins type0:$src1);
diff --git a/llvm/include/llvm/Target/Target.td b/llvm/include/llvm/Target/Target.td
index 44ae273f3910..8e93a6e6fd6a 100644
--- a/llvm/include/llvm/Target/Target.td
+++ b/llvm/include/llvm/Target/Target.td
@@ -756,6 +756,28 @@ def ins;
/// of operands.
def variable_ops;
+/// variable-length instruction encoding utilities.
+/// The `ascend` operator should be used like this:
+/// (ascend 0b0000, 0b1111)
+/// Which represent a seqence of encoding fragments placing from LSB to MSB.
+/// Thus, in this case the final encoding will be 0b11110000.
+/// The arguments for `ascend` can either be `bits` or another DAG.
+def ascend;
+/// In addition, we can use `descend` to describe an encoding that places
+/// its arguments (i.e. encoding fragments) from MSB to LSB. For instance:
+/// (descend 0b0000, 0b1111)
+/// This results in an encoding of 0b00001111.
+def descend;
+/// The `operand` operator should be used like this:
+/// (operand "$src", 4)
+/// Which represents a 4-bit encoding for an instruction operand named `$src`.
+def operand;
+/// Similar to `operand`, we can reference only part of the operand's encoding:
+/// (slice "$src", 6, 8)
+/// (slice "$src", 8, 6)
+/// Both DAG represent bit 6 to 8 (total of 3 bits) in the encoding of operand
+/// `$src`.
+def slice;
/// PointerLikeRegClass - Values that are designed to have pointer width are
/// derived from this. TableGen treats the register class as having a symbolic
diff --git a/llvm/include/llvm/Target/TargetSelectionDAG.td b/llvm/include/llvm/Target/TargetSelectionDAG.td
index d8ef7c49a5f9..7b2a25605acb 100644
--- a/llvm/include/llvm/Target/TargetSelectionDAG.td
+++ b/llvm/include/llvm/Target/TargetSelectionDAG.td
@@ -365,6 +365,10 @@ def mul : SDNode<"ISD::MUL" , SDTIntBinOp,
[SDNPCommutative, SDNPAssociative]>;
def mulhs : SDNode<"ISD::MULHS" , SDTIntBinOp, [SDNPCommutative]>;
def mulhu : SDNode<"ISD::MULHU" , SDTIntBinOp, [SDNPCommutative]>;
+def avgfloors : SDNode<"ISD::AVGFLOORS" , SDTIntBinOp, [SDNPCommutative]>;
+def avgflooru : SDNode<"ISD::AVGFLOORU" , SDTIntBinOp, [SDNPCommutative]>;
+def avgceils : SDNode<"ISD::AVGCEILS" , SDTIntBinOp, [SDNPCommutative]>;
+def avgceilu : SDNode<"ISD::AVGCEILU" , SDTIntBinOp, [SDNPCommutative]>;
def abds : SDNode<"ISD::ABDS" , SDTIntBinOp, [SDNPCommutative]>;
def abdu : SDNode<"ISD::ABDU" , SDTIntBinOp, [SDNPCommutative]>;
def smullohi : SDNode<"ISD::SMUL_LOHI" , SDTIntBinHiLoOp, [SDNPCommutative]>;
diff --git a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
index 3bb4a8297be6..b46639301277 100644
--- a/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
+++ b/llvm/include/llvm/Transforms/Utils/MemoryTaggingSupport.h
@@ -19,8 +19,10 @@
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Module.h"
+#include "llvm/IR/ValueHandle.h"
namespace llvm {
+namespace memtag {
// For an alloca valid between lifetime markers Start and Ends, call the
// Callback for all possible exits out of the lifetime in the containing
// function, which can return from the instructions in RetVec.
@@ -71,6 +73,35 @@ bool isStandardLifetime(const SmallVectorImpl<IntrinsicInst *> &LifetimeStart,
Instruction *getUntagLocationIfFunctionExit(Instruction &Inst);
+struct AllocaInfo {
+ AllocaInst *AI;
+ TrackingVH<Instruction> OldAI; // Track through RAUW to replace debug uses.
+ SmallVector<IntrinsicInst *, 2> LifetimeStart;
+ SmallVector<IntrinsicInst *, 2> LifetimeEnd;
+ SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
+};
+
+struct StackInfo {
+ MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
+ SmallVector<Instruction *, 4> UnrecognizedLifetimes;
+ SmallVector<Instruction *, 8> RetVec;
+ bool CallsReturnTwice = false;
+};
+
+class StackInfoBuilder {
+public:
+ StackInfoBuilder(std::function<bool(const AllocaInst &)> IsInterestingAlloca)
+ : IsInterestingAlloca(IsInterestingAlloca) {}
+
+ void visit(Instruction &Inst);
+ StackInfo &get() { return Info; };
+
+private:
+ StackInfo Info;
+ std::function<bool(const AllocaInst &)> IsInterestingAlloca;
+};
+
+} // namespace memtag
} // namespace llvm
#endif
diff --git a/llvm/lib/Analysis/ScalarEvolution.cpp b/llvm/lib/Analysis/ScalarEvolution.cpp
index 465e52bf0375..6170bb8a5db3 100644
--- a/llvm/lib/Analysis/ScalarEvolution.cpp
+++ b/llvm/lib/Analysis/ScalarEvolution.cpp
@@ -5887,6 +5887,41 @@ const SCEV *ScalarEvolution::createNodeForPHI(PHINode *PN) {
return getUnknown(PN);
}
+bool SCEVMinMaxExprContains(const SCEV *Root, const SCEV *OperandToFind,
+ SCEVTypes RootKind) {
+ struct FindClosure {
+ const SCEV *OperandToFind;
+ const SCEVTypes RootKind; // Must be a sequential min/max expression.
+ const SCEVTypes NonSequentialRootKind; // Non-seq variant of RootKind.
+
+ bool Found = false;
+
+ bool canRecurseInto(SCEVTypes Kind) const {
+ // We can only recurse into the SCEV expression of the same effective type
+ // as the type of our root SCEV expression.
+ return RootKind == Kind || NonSequentialRootKind == Kind;
+ };
+
+ FindClosure(const SCEV *OperandToFind, SCEVTypes RootKind)
+ : OperandToFind(OperandToFind), RootKind(RootKind),
+ NonSequentialRootKind(
+ SCEVSequentialMinMaxExpr::getEquivalentNonSequentialSCEVType(
+ RootKind)) {}
+
+ bool follow(const SCEV *S) {
+ Found = S == OperandToFind;
+
+ return !isDone() && canRecurseInto(S->getSCEVType());
+ }
+
+ bool isDone() const { return Found; }
+ };
+
+ FindClosure FC(OperandToFind, RootKind);
+ visitAll(Root, FC);
+ return FC.Found;
+}
+
const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
Instruction *I, ICmpInst *Cond, Value *TrueVal, Value *FalseVal) {
// Try to match some simple smax or umax patterns.
@@ -5952,31 +5987,32 @@ const SCEV *ScalarEvolution::createNodeForSelectOrPHIInstWithICmpInstCond(
}
break;
case ICmpInst::ICMP_NE:
- // n != 0 ? n+x : 1+x -> umax(n, 1)+x
- if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
- isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
- const SCEV *One = getOne(I->getType());
- const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
- const SCEV *LA = getSCEV(TrueVal);
- const SCEV *RA = getSCEV(FalseVal);
- const SCEV *LDiff = getMinusSCEV(LA, LS);
- const SCEV *RDiff = getMinusSCEV(RA, One);
- if (LDiff == RDiff)
- return getAddExpr(getUMaxExpr(One, LS), LDiff);
- }
- break;
+ // x != 0 ? x+y : C+y -> x == 0 ? C+y : x+y
+ std::swap(TrueVal, FalseVal);
+ LLVM_FALLTHROUGH;
case ICmpInst::ICMP_EQ:
- // n == 0 ? 1+x : n+x -> umax(n, 1)+x
+ // x == 0 ? C+y : x+y -> umax(x, C)+y iff C u<= 1
if (getTypeSizeInBits(LHS->getType()) <= getTypeSizeInBits(I->getType()) &&
isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero()) {
- const SCEV *One = getOne(I->getType());
- const SCEV *LS = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
- const SCEV *LA = getSCEV(TrueVal);
- const SCEV *RA = getSCEV(FalseVal);
- const SCEV *LDiff = getMinusSCEV(LA, One);
- const SCEV *RDiff = getMinusSCEV(RA, LS);
- if (LDiff == RDiff)
- return getAddExpr(getUMaxExpr(One, LS), LDiff);
+ const SCEV *X = getNoopOrZeroExtend(getSCEV(LHS), I->getType());
+ const SCEV *TrueValExpr = getSCEV(TrueVal); // C+y
+ const SCEV *FalseValExpr = getSCEV(FalseVal); // x+y
+ const SCEV *Y = getMinusSCEV(FalseValExpr, X); // y = (x+y)-x
+ const SCEV *C = getMinusSCEV(TrueValExpr, Y); // C = (C+y)-y
+ if (isa<SCEVConstant>(C) && cast<SCEVConstant>(C)->getAPInt().ule(1))
+ return getAddExpr(getUMaxExpr(X, C), Y);
+ }
+ // x == 0 ? 0 : umin (..., x, ...) -> umin_seq(x, umin (...))
+ // x == 0 ? 0 : umin_seq(..., x, ...) -> umin_seq(x, umin_seq(...))
+ // x == 0 ? 0 : umin (..., umin_seq(..., x, ...), ...)
+ // -> umin_seq(x, umin (..., umin_seq(...), ...))
+ if (getTypeSizeInBits(LHS->getType()) == getTypeSizeInBits(I->getType()) &&
+ isa<ConstantInt>(RHS) && cast<ConstantInt>(RHS)->isZero() &&
+ isa<ConstantInt>(TrueVal) && cast<ConstantInt>(TrueVal)->isZero()) {
+ const SCEV *X = getSCEV(LHS);
+ const SCEV *FalseValExpr = getSCEV(FalseVal);
+ if (SCEVMinMaxExprContains(FalseValExpr, X, scSequentialUMinExpr))
+ return getUMinExpr(X, FalseValExpr, /*Sequential=*/true);
}
break;
default:
@@ -12866,8 +12902,8 @@ static void PrintLoopInfo(raw_ostream &OS, ScalarEvolution *SE,
if (!isa<SCEVCouldNotCompute>(PBT)) {
OS << "Predicated backedge-taken count is " << *PBT << "\n";
OS << " Predicates:\n";
- SCEVUnionPredicate Dedup(Preds);
- Dedup.print(OS, 4);
+ for (auto *P : Preds)
+ P->print(OS, 4);
} else {
OS << "Unpredictable predicated backedge-taken count. ";
}
@@ -13642,8 +13678,7 @@ public:
const SCEV *visitUnknown(const SCEVUnknown *Expr) {
if (Pred) {
if (auto *U = dyn_cast<SCEVUnionPredicate>(Pred)) {
- auto ExprPreds = U->getPredicatesForExpr(Expr);
- for (auto *Pred : ExprPreds)
+ for (auto *Pred : U->getPredicates())
if (const auto *IPred = dyn_cast<SCEVComparePredicate>(Pred))
if (IPred->getLHS() == Expr &&
IPred->getPredicate() == ICmpInst::ICMP_EQ)
@@ -13726,8 +13761,7 @@ private:
for (auto *P : PredicatedRewrite->second){
// Wrap predicates from outer loops are not supported.
if (auto *WP = dyn_cast<const SCEVWrapPredicate>(P)) {
- auto *AR = cast<const SCEVAddRecExpr>(WP->getExpr());
- if (L != AR->getLoop())
+ if (L != WP->getExpr()->getLoop())
return Expr;
}
if (!addOverflowAssumption(P))
@@ -13794,8 +13828,6 @@ bool SCEVComparePredicate::implies(const SCEVPredicate *N) const {
bool SCEVComparePredicate::isAlwaysTrue() const { return false; }
-const SCEV *SCEVComparePredicate::getExpr() const { return LHS; }
-
void SCEVComparePredicate::print(raw_ostream &OS, unsigned Depth) const {
if (Pred == ICmpInst::ICMP_EQ)
OS.indent(Depth) << "Equal predicate: " << *LHS << " == " << *RHS << "\n";
@@ -13811,7 +13843,7 @@ SCEVWrapPredicate::SCEVWrapPredicate(const FoldingSetNodeIDRef ID,
IncrementWrapFlags Flags)
: SCEVPredicate(ID, P_Wrap), AR(AR), Flags(Flags) {}
-const SCEV *SCEVWrapPredicate::getExpr() const { return AR; }
+const SCEVAddRecExpr *SCEVWrapPredicate::getExpr() const { return AR; }
bool SCEVWrapPredicate::implies(const SCEVPredicate *N) const {
const auto *Op = dyn_cast<SCEVWrapPredicate>(N);
@@ -13871,30 +13903,15 @@ bool SCEVUnionPredicate::isAlwaysTrue() const {
[](const SCEVPredicate *I) { return I->isAlwaysTrue(); });
}
-ArrayRef<const SCEVPredicate *>
-SCEVUnionPredicate::getPredicatesForExpr(const SCEV *Expr) const {
- auto I = SCEVToPreds.find(Expr);
- if (I == SCEVToPreds.end())
- return ArrayRef<const SCEVPredicate *>();
- return I->second;
-}
-
bool SCEVUnionPredicate::implies(const SCEVPredicate *N) const {
if (const auto *Set = dyn_cast<SCEVUnionPredicate>(N))
return all_of(Set->Preds,
[this](const SCEVPredicate *I) { return this->implies(I); });
- auto ScevPredsIt = SCEVToPreds.find(N->getExpr());
- if (ScevPredsIt == SCEVToPreds.end())
- return false;
- auto &SCEVPreds = ScevPredsIt->second;
-
- return any_of(SCEVPreds,
+ return any_of(Preds,
[N](const SCEVPredicate *I) { return I->implies(N); });
}
-const SCEV *SCEVUnionPredicate::getExpr() const { return nullptr; }
-
void SCEVUnionPredicate::print(raw_ostream &OS, unsigned Depth) const {
for (auto Pred : Preds)
Pred->print(OS, Depth);
@@ -13907,14 +13924,6 @@ void SCEVUnionPredicate::add(const SCEVPredicate *N) {
return;
}
- if (implies(N))
- return;
-
- const SCEV *Key = N->getExpr();
- assert(Key && "Only SCEVUnionPredicate doesn't have an "
- " associated expression!");
-
- SCEVToPreds[Key].push_back(N);
Preds.push_back(N);
}
diff --git a/llvm/lib/CodeGen/CodeGenCommonISel.cpp b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
index f809120f02fa..877aa69c3e58 100644
--- a/llvm/lib/CodeGen/CodeGenCommonISel.cpp
+++ b/llvm/lib/CodeGen/CodeGenCommonISel.cpp
@@ -129,9 +129,7 @@ llvm::findSplitPointForStackProtector(MachineBasicBlock *BB,
MachineBasicBlock::iterator Start = BB->begin();
MachineBasicBlock::iterator Previous = SplitPoint;
- do {
- --Previous;
- } while (Previous->isDebugInstr());
+ --Previous;
if (TII.isTailCall(*SplitPoint) &&
Previous->getOpcode() == TII.getCallFrameDestroyOpcode()) {
@@ -144,7 +142,7 @@ llvm::findSplitPointForStackProtector(MachineBasicBlock *BB,
// ADJCALLSTACKUP ...
// TAILJMP somewhere
// On the other hand, it could be an unrelated call in which case this tail
- // call has no register moves of its own and should be the split point. For
+ // call has to register moves of its own and should be the split point. For
// example:
// ADJCALLSTACKDOWN
// CALL something_else
diff --git a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
index 6d415c9c7f90..5ac0803bc61f 100644
--- a/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
+++ b/llvm/lib/CodeGen/GlobalISel/IRTranslator.cpp
@@ -2252,6 +2252,23 @@ bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
return CLI->lowerCall(MIRBuilder, Info);
}
+ case Intrinsic::fptrunc_round: {
+ unsigned Flags = MachineInstr::copyFlagsFromInstruction(CI);
+
+ // Convert the metadata argument to a constant integer
+ Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
+ Optional<RoundingMode> RoundMode =
+ convertStrToRoundingMode(cast<MDString>(MD)->getString());
+
+ // Add the Rounding mode as an integer
+ MIRBuilder
+ .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
+ {getOrCreateVReg(CI)},
+ {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
+ .addImm((int)RoundMode.getValue());
+
+ return true;
+ }
#define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
case Intrinsic::INTRINSIC:
#include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index 0dbbc218e946..e584ebe88538 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -1081,8 +1081,7 @@ using MIRegs = std::pair<MachineInstr *, SmallVector<unsigned, 2>>;
/// Sink an instruction and its associated debug instructions.
static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
MachineBasicBlock::iterator InsertPos,
- SmallVectorImpl<MIRegs> &DbgValuesToSink) {
-
+ ArrayRef<MIRegs> DbgValuesToSink) {
// If we cannot find a location to use (merge with), then we erase the debug
// location to prevent debug-info driven tools from potentially reporting
// wrong location information.
@@ -1101,7 +1100,7 @@ static void performSink(MachineInstr &MI, MachineBasicBlock &SuccToSinkTo,
// DBG_VALUE location as 'undef', indicating that any earlier variable
// location should be terminated as we've optimised away the value at this
// point.
- for (auto DbgValueToSink : DbgValuesToSink) {
+ for (const auto &DbgValueToSink : DbgValuesToSink) {
MachineInstr *DbgMI = DbgValueToSink.first;
MachineInstr *NewDbgMI = DbgMI->getMF()->CloneMachineInstr(DbgMI);
SuccToSinkTo.insert(InsertPos, NewDbgMI);
@@ -1684,14 +1683,6 @@ static bool hasRegisterDependency(MachineInstr *MI,
return HasRegDependency;
}
-static SmallSet<MCRegister, 4> getRegUnits(MCRegister Reg,
- const TargetRegisterInfo *TRI) {
- SmallSet<MCRegister, 4> RegUnits;
- for (auto RI = MCRegUnitIterator(Reg, TRI); RI.isValid(); ++RI)
- RegUnits.insert(*RI);
- return RegUnits;
-}
-
bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
MachineFunction &MF,
const TargetRegisterInfo *TRI,
@@ -1737,14 +1728,15 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
}
// Record debug use of each reg unit.
- SmallSet<MCRegister, 4> RegUnits = getRegUnits(MO.getReg(), TRI);
- for (MCRegister Reg : RegUnits)
- MIUnits[Reg].push_back(MO.getReg());
+ for (auto RI = MCRegUnitIterator(MO.getReg(), TRI); RI.isValid();
+ ++RI)
+ MIUnits[*RI].push_back(MO.getReg());
}
}
if (IsValid) {
- for (auto RegOps : MIUnits)
- SeenDbgInstrs[RegOps.first].push_back({&MI, RegOps.second});
+ for (auto &RegOps : MIUnits)
+ SeenDbgInstrs[RegOps.first].emplace_back(&MI,
+ std::move(RegOps.second));
}
continue;
}
@@ -1791,17 +1783,15 @@ bool PostRAMachineSinking::tryToSinkCopy(MachineBasicBlock &CurBB,
if (!MO.isReg() || !MO.isDef())
continue;
- SmallSet<MCRegister, 4> Units = getRegUnits(MO.getReg(), TRI);
- for (MCRegister Reg : Units) {
- for (auto MIRegs : SeenDbgInstrs.lookup(Reg)) {
+ for (auto RI = MCRegUnitIterator(MO.getReg(), TRI); RI.isValid(); ++RI) {
+ for (const auto &MIRegs : SeenDbgInstrs.lookup(*RI)) {
auto &Regs = DbgValsToSinkMap[MIRegs.first];
for (unsigned Reg : MIRegs.second)
Regs.push_back(Reg);
}
}
}
- SmallVector<MIRegs, 4> DbgValsToSink(DbgValsToSinkMap.begin(),
- DbgValsToSinkMap.end());
+ auto DbgValsToSink = DbgValsToSinkMap.takeVector();
// Clear the kill flag if SrcReg is killed between MI and the end of the
// block.
diff --git a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
index b316967660bb..dbf2f4e459aa 100644
--- a/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/DAGCombiner.cpp
@@ -12688,6 +12688,87 @@ SDValue DAGCombiner::visitEXTEND_VECTOR_INREG(SDNode *N) {
return SDValue();
}
+// Attempt to form one of the avg patterns from:
+// truncate(shr(add(zext(OpB), zext(OpA)), 1))
+// Creating avgflooru/avgfloors/avgceilu/avgceils, with the ceiling having an
+// extra rounding add:
+// truncate(shr(add(zext(OpB), zext(OpA), 1), 1))
+// This starts at a truncate, meaning the shift will always be shl, as the top
+// bits are known to not be demanded.
+static SDValue performAvgCombine(SDNode *N, SelectionDAG &DAG) {
+ assert(N->getOpcode() == ISD::TRUNCATE && "TRUNCATE node expected");
+ EVT VT = N->getValueType(0);
+
+ SDValue Shift = N->getOperand(0);
+ if (Shift.getOpcode() != ISD::SRL)
+ return SDValue();
+
+ // Is the right shift using an immediate value of 1?
+ ConstantSDNode *N1C = isConstOrConstSplat(Shift.getOperand(1));
+ if (!N1C || !N1C->isOne())
+ return SDValue();
+
+ // We are looking for an avgfloor
+ // add(ext, ext)
+ // or one of these as a avgceil
+ // add(add(ext, ext), 1)
+ // add(add(ext, 1), ext)
+ // add(ext, add(ext, 1))
+ SDValue Add = Shift.getOperand(0);
+ if (Add.getOpcode() != ISD::ADD)
+ return SDValue();
+
+ SDValue ExtendOpA = Add.getOperand(0);
+ SDValue ExtendOpB = Add.getOperand(1);
+ auto MatchOperands = [&](SDValue Op1, SDValue Op2, SDValue Op3) {
+ ConstantSDNode *ConstOp;
+ if ((ConstOp = isConstOrConstSplat(Op1)) && ConstOp->isOne()) {
+ ExtendOpA = Op2;
+ ExtendOpB = Op3;
+ return true;
+ }
+ if ((ConstOp = isConstOrConstSplat(Op2)) && ConstOp->isOne()) {
+ ExtendOpA = Op1;
+ ExtendOpB = Op3;
+ return true;
+ }
+ if ((ConstOp = isConstOrConstSplat(Op3)) && ConstOp->isOne()) {
+ ExtendOpA = Op1;
+ ExtendOpB = Op2;
+ return true;
+ }
+ return false;
+ };
+ bool IsCeil = (ExtendOpA.getOpcode() == ISD::ADD &&
+ MatchOperands(ExtendOpA.getOperand(0), ExtendOpA.getOperand(1),
+ ExtendOpB)) ||
+ (ExtendOpB.getOpcode() == ISD::ADD &&
+ MatchOperands(ExtendOpB.getOperand(0), ExtendOpB.getOperand(1),
+ ExtendOpA));
+
+ unsigned ExtendOpAOpc = ExtendOpA.getOpcode();
+ unsigned ExtendOpBOpc = ExtendOpB.getOpcode();
+ if (!(ExtendOpAOpc == ExtendOpBOpc &&
+ (ExtendOpAOpc == ISD::ZERO_EXTEND || ExtendOpAOpc == ISD::SIGN_EXTEND)))
+ return SDValue();
+
+ // Is the result of the right shift being truncated to the same value type as
+ // the original operands, OpA and OpB?
+ SDValue OpA = ExtendOpA.getOperand(0);
+ SDValue OpB = ExtendOpB.getOperand(0);
+ EVT OpAVT = OpA.getValueType();
+ if (VT != OpAVT || OpAVT != OpB.getValueType())
+ return SDValue();
+
+ bool IsSignExtend = ExtendOpAOpc == ISD::SIGN_EXTEND;
+ unsigned AVGOpc = IsSignExtend ? (IsCeil ? ISD::AVGCEILS : ISD::AVGFLOORS)
+ : (IsCeil ? ISD::AVGCEILU : ISD::AVGFLOORU);
+ if (!DAG.getTargetLoweringInfo().isOperationLegalOrCustom(AVGOpc, VT))
+ return SDValue();
+
+ return DAG.getNode(AVGOpc, SDLoc(N), VT, OpA, OpB);
+}
+
SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
SDValue N0 = N->getOperand(0);
EVT VT = N->getValueType(0);
@@ -12974,6 +13055,8 @@ SDValue DAGCombiner::visitTRUNCATE(SDNode *N) {
if (SDValue NewVSel = matchVSelectOpSizesWithSetCC(N))
return NewVSel;
+ if (SDValue M = performAvgCombine(N, DAG))
+ return M;
// Narrow a suitable binary operation with a non-opaque constant operand by
// moving it ahead of the truncate. This is limited to pre-legalization
@@ -19112,47 +19195,33 @@ SDValue DAGCombiner::scalarizeExtractedVectorLoad(SDNode *EVE, EVT InVecVT,
SDValue NewPtr = TLI.getVectorElementPointer(DAG, OriginalLoad->getBasePtr(),
InVecVT, EltNo);
- // The replacement we need to do here is a little tricky: we need to
- // replace an extractelement of a load with a load.
- // Use ReplaceAllUsesOfValuesWith to do the replacement.
- // Note that this replacement assumes that the extractvalue is the only
- // use of the load; that's okay because we don't want to perform this
- // transformation in other cases anyway.
+ // We are replacing a vector load with a scalar load. The new load must have
+ // identical memory op ordering to the original.
SDValue Load;
- SDValue Chain;
if (ResultVT.bitsGT(VecEltVT)) {
// If the result type of vextract is wider than the load, then issue an
// extending load instead.
- ISD::LoadExtType ExtType = TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT,
- VecEltVT)
- ? ISD::ZEXTLOAD
- : ISD::EXTLOAD;
- Load = DAG.getExtLoad(ExtType, SDLoc(EVE), ResultVT,
- OriginalLoad->getChain(), NewPtr, MPI, VecEltVT,
- Alignment, OriginalLoad->getMemOperand()->getFlags(),
+ ISD::LoadExtType ExtType =
+ TLI.isLoadExtLegal(ISD::ZEXTLOAD, ResultVT, VecEltVT) ? ISD::ZEXTLOAD
+ : ISD::EXTLOAD;
+ Load = DAG.getExtLoad(ExtType, DL, ResultVT, OriginalLoad->getChain(),
+ NewPtr, MPI, VecEltVT, Alignment,
+ OriginalLoad->getMemOperand()->getFlags(),
OriginalLoad->getAAInfo());
- Chain = Load.getValue(1);
+ DAG.makeEquivalentMemoryOrdering(OriginalLoad, Load);
} else {
- Load = DAG.getLoad(
- VecEltVT, SDLoc(EVE), OriginalLoad->getChain(), NewPtr, MPI, Alignment,
- OriginalLoad->getMemOperand()->getFlags(), OriginalLoad->getAAInfo());
- Chain = Load.getValue(1);
+ // The result type is narrower or the same width as the vector element
+ Load = DAG.getLoad(VecEltVT, DL, OriginalLoad->getChain(), NewPtr, MPI,
+ Alignment, OriginalLoad->getMemOperand()->getFlags(),
+ OriginalLoad->getAAInfo());
+ DAG.makeEquivalentMemoryOrdering(OriginalLoad, Load);
if (ResultVT.bitsLT(VecEltVT))
- Load = DAG.getNode(ISD::TRUNCATE, SDLoc(EVE), ResultVT, Load);
+ Load = DAG.getNode(ISD::TRUNCATE, DL, ResultVT, Load);
else
Load = DAG.getBitcast(ResultVT, Load);
}
- WorklistRemover DeadNodes(*this);
- SDValue From[] = { SDValue(EVE, 0), SDValue(OriginalLoad, 1) };
- SDValue To[] = { Load, Chain };
- DAG.ReplaceAllUsesOfValuesWith(From, To, 2);
- // Make sure to revisit this node to clean it up; it will usually be dead.
- AddToWorklist(EVE);
- // Since we're explicitly calling ReplaceAllUses, add the new node to the
- // worklist explicitly as well.
- AddToWorklistWithUsers(Load.getNode());
++OpsNarrowed;
- return SDValue(EVE, 0);
+ return Load;
}
/// Transform a vector binary operation into a scalar binary operation by moving
diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
index bc5d68a6ee8a..97acead64c40 100644
--- a/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeVectorTypes.cpp
@@ -3287,6 +3287,10 @@ void DAGTypeLegalizer::WidenVectorResult(SDNode *N, unsigned ResNo) {
case ISD::USHLSAT:
case ISD::ROTL:
case ISD::ROTR:
+ case ISD::AVGFLOORS:
+ case ISD::AVGFLOORU:
+ case ISD::AVGCEILS:
+ case ISD::AVGCEILU:
// Vector-predicated binary op widening. Note that -- unlike the
// unpredicated versions -- we don't have to worry about trapping on
// operations like UDIV, FADD, etc., as we pass on the original vector
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
index 01230a36e744..78da827c96f7 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp
@@ -6343,6 +6343,29 @@ void SelectionDAGBuilder::visitIntrinsicCall(const CallInst &I,
#include "llvm/IR/VPIntrinsics.def"
visitVectorPredicationIntrinsic(cast<VPIntrinsic>(I));
return;
+ case Intrinsic::fptrunc_round: {
+ // Get the last argument, the metadata and convert it to an integer in the
+ // call
+ Metadata *MD = cast<MetadataAsValue>(I.getArgOperand(1))->getMetadata();
+ Optional<RoundingMode> RoundMode =
+ convertStrToRoundingMode(cast<MDString>(MD)->getString());
+
+ EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
+
+ // Propagate fast-math-flags from IR to node(s).
+ SDNodeFlags Flags;
+ Flags.copyFMF(*cast<FPMathOperator>(&I));
+ SelectionDAG::FlagInserter FlagsInserter(DAG, Flags);
+
+ SDValue Result;
+ Result = DAG.getNode(
+ ISD::FPTRUNC_ROUND, sdl, VT, getValue(I.getArgOperand(0)),
+ DAG.getTargetConstant((int)RoundMode.getValue(), sdl,
+ TLI.getPointerTy(DAG.getDataLayout())));
+ setValue(&I, Result);
+
+ return;
+ }
case Intrinsic::fmuladd: {
EVT VT = TLI.getValueType(DAG.getDataLayout(), I.getType());
if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
diff --git a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
index 77e9e53668f9..38405f460942 100644
--- a/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/SelectionDAGDumper.cpp
@@ -231,6 +231,10 @@ std::string SDNode::getOperationName(const SelectionDAG *G) const {
case ISD::MUL: return "mul";
case ISD::MULHU: return "mulhu";
case ISD::MULHS: return "mulhs";
+ case ISD::AVGFLOORU: return "avgflooru";
+ case ISD::AVGFLOORS: return "avgfloors";
+ case ISD::AVGCEILU: return "avgceilu";
+ case ISD::AVGCEILS: return "avgceils";
case ISD::ABDS: return "abds";
case ISD::ABDU: return "abdu";
case ISD::SDIV: return "sdiv";
diff --git a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
index 72f14b456882..51f758650d87 100644
--- a/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
+++ b/llvm/lib/CodeGen/SelectionDAG/TargetLowering.cpp
@@ -121,7 +121,7 @@ void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
IsSwiftError = Call->paramHasAttr(ArgIdx, Attribute::SwiftError);
Alignment = Call->getParamStackAlign(ArgIdx);
IndirectType = nullptr;
- assert(IsByVal + IsPreallocated + IsInAlloca <= 1 &&
+ assert(IsByVal + IsPreallocated + IsInAlloca + IsSRet <= 1 &&
"multiple ABI attributes?");
if (IsByVal) {
IndirectType = Call->getParamByValType(ArgIdx);
@@ -132,6 +132,8 @@ void TargetLoweringBase::ArgListEntry::setAttributes(const CallBase *Call,
IndirectType = Call->getParamPreallocatedType(ArgIdx);
if (IsInAlloca)
IndirectType = Call->getParamInAllocaType(ArgIdx);
+ if (IsSRet)
+ IndirectType = Call->getParamStructRetType(ArgIdx);
}
/// Generate a libcall taking the given operands as arguments and returning a
diff --git a/llvm/lib/CodeGen/TargetLoweringBase.cpp b/llvm/lib/CodeGen/TargetLoweringBase.cpp
index ab574232e367..3a7e82c9038c 100644
--- a/llvm/lib/CodeGen/TargetLoweringBase.cpp
+++ b/llvm/lib/CodeGen/TargetLoweringBase.cpp
@@ -817,6 +817,12 @@ void TargetLoweringBase::initActions() {
setOperationAction(ISD::SUBC, VT, Expand);
setOperationAction(ISD::SUBE, VT, Expand);
+ // Halving adds
+ setOperationAction(ISD::AVGFLOORS, VT, Expand);
+ setOperationAction(ISD::AVGFLOORU, VT, Expand);
+ setOperationAction(ISD::AVGCEILS, VT, Expand);
+ setOperationAction(ISD::AVGCEILU, VT, Expand);
+
// Absolute difference
setOperationAction(ISD::ABDS, VT, Expand);
setOperationAction(ISD::ABDU, VT, Expand);
diff --git a/llvm/lib/IR/DIBuilder.cpp b/llvm/lib/IR/DIBuilder.cpp
index dc5768dd4f26..16f7072d9d04 100644
--- a/llvm/lib/IR/DIBuilder.cpp
+++ b/llvm/lib/IR/DIBuilder.cpp
@@ -293,6 +293,22 @@ DIStringType *DIBuilder::createStringType(StringRef Name, uint64_t SizeInBits) {
SizeInBits, 0);
}
+DIStringType *DIBuilder::createStringType(StringRef Name,
+ DIVariable *StringLength,
+ DIExpression *StrLocationExp) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIStringType::get(VMContext, dwarf::DW_TAG_string_type, Name,
+ StringLength, nullptr, StrLocationExp, 0, 0, 0);
+}
+
+DIStringType *DIBuilder::createStringType(StringRef Name,
+ DIExpression *StringLengthExp,
+ DIExpression *StrLocationExp) {
+ assert(!Name.empty() && "Unable to create type without name");
+ return DIStringType::get(VMContext, dwarf::DW_TAG_string_type, Name, nullptr,
+ StringLengthExp, StrLocationExp, 0, 0, 0);
+}
+
DIDerivedType *DIBuilder::createQualifiedType(unsigned Tag, DIType *FromTy) {
return DIDerivedType::get(VMContext, Tag, "", nullptr, 0, nullptr, FromTy, 0,
0, 0, None, DINode::FlagZero);
diff --git a/llvm/lib/IR/Verifier.cpp b/llvm/lib/IR/Verifier.cpp
index 462615037189..e61358906837 100644
--- a/llvm/lib/IR/Verifier.cpp
+++ b/llvm/lib/IR/Verifier.cpp
@@ -4772,6 +4772,27 @@ void Verifier::visitIntrinsicCall(Intrinsic::ID ID, CallBase &Call) {
"an array");
break;
}
+ case Intrinsic::fptrunc_round: {
+ // Check the rounding mode
+ Metadata *MD = nullptr;
+ auto *MAV = dyn_cast<MetadataAsValue>(Call.getOperand(1));
+ if (MAV)
+ MD = MAV->getMetadata();
+
+ Assert(MD != nullptr, "missing rounding mode argument", Call);
+
+ Assert(isa<MDString>(MD),
+ ("invalid value for llvm.fptrunc.round metadata operand"
+ " (the operand should be a string)"),
+ MD);
+
+ Optional<RoundingMode> RoundMode =
+ convertStrToRoundingMode(cast<MDString>(MD)->getString());
+ Assert(RoundMode.hasValue() &&
+ RoundMode.getValue() != RoundingMode::Dynamic,
+ "unsupported rounding mode argument", Call);
+ break;
+ }
#define INSTRUCTION(NAME, NARGS, ROUND_MODE, INTRINSIC) \
case Intrinsic::INTRINSIC:
#include "llvm/IR/ConstrainedOps.def"
diff --git a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
index bbb640cfaee8..8ec26b0a0e65 100644
--- a/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
+++ b/llvm/lib/ProfileData/ProfileSummaryBuilder.cpp
@@ -110,7 +110,13 @@ void SampleProfileSummaryBuilder::addRecord(
NumFunctions++;
if (FS.getHeadSamples() > MaxFunctionCount)
MaxFunctionCount = FS.getHeadSamples();
+ } else if (FS.getContext().hasAttribute(
+ sampleprof::ContextDuplicatedIntoBase)) {
+ // Do not recount callee samples if they are already merged into their base
+ // profiles. This can happen to CS nested profile.
+ return;
}
+
for (const auto &I : FS.getBodySamples()) {
uint64_t Count = I.second.getSamples();
addCount(Count);
diff --git a/llvm/lib/ProfileData/SampleProf.cpp b/llvm/lib/ProfileData/SampleProf.cpp
index 9b01a386a360..5e11df6b6aad 100644
--- a/llvm/lib/ProfileData/SampleProf.cpp
+++ b/llvm/lib/ProfileData/SampleProf.cpp
@@ -531,8 +531,14 @@ void CSProfileConverter::convertProfiles(CSProfileConverter::FrameNode &Node) {
// thus done optionally. It is seen that duplicating context profiles into
// base profiles improves the code quality for thinlto build by allowing a
// profile in the prelink phase for to-be-fully-inlined functions.
- if (!NodeProfile || GenerateMergedBaseProfiles)
+ if (!NodeProfile) {
ProfileMap[ChildProfile->getContext()].merge(*ChildProfile);
+ } else if (GenerateMergedBaseProfiles) {
+ ProfileMap[ChildProfile->getContext()].merge(*ChildProfile);
+ auto &SamplesMap = NodeProfile->functionSamplesAt(ChildNode.CallSiteLoc);
+ SamplesMap[ChildProfile->getName().str()].getContext().setAttribute(
+ ContextDuplicatedIntoBase);
+ }
// Contexts coming with a `ContextShouldBeInlined` attribute indicate this
// is a preinliner-computed profile.
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
index d33888247bd2..9890b3e7820a 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.cpp
@@ -870,7 +870,6 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::SIGN_EXTEND);
setTargetDAGCombine(ISD::VECTOR_SPLICE);
setTargetDAGCombine(ISD::SIGN_EXTEND_INREG);
- setTargetDAGCombine(ISD::TRUNCATE);
setTargetDAGCombine(ISD::CONCAT_VECTORS);
setTargetDAGCombine(ISD::INSERT_SUBVECTOR);
setTargetDAGCombine(ISD::STORE);
@@ -1047,6 +1046,10 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM,
for (MVT VT : {MVT::v8i8, MVT::v4i16, MVT::v2i32, MVT::v16i8, MVT::v8i16,
MVT::v4i32}) {
+ setOperationAction(ISD::AVGFLOORS, VT, Legal);
+ setOperationAction(ISD::AVGFLOORU, VT, Legal);
+ setOperationAction(ISD::AVGCEILS, VT, Legal);
+ setOperationAction(ISD::AVGCEILU, VT, Legal);
setOperationAction(ISD::ABDS, VT, Legal);
setOperationAction(ISD::ABDU, VT, Legal);
}
@@ -2096,10 +2099,6 @@ const char *AArch64TargetLowering::getTargetNodeName(unsigned Opcode) const {
MAKE_CASE(AArch64ISD::FCMLTz)
MAKE_CASE(AArch64ISD::SADDV)
MAKE_CASE(AArch64ISD::UADDV)
- MAKE_CASE(AArch64ISD::SRHADD)
- MAKE_CASE(AArch64ISD::URHADD)
- MAKE_CASE(AArch64ISD::SHADD)
- MAKE_CASE(AArch64ISD::UHADD)
MAKE_CASE(AArch64ISD::SDOT)
MAKE_CASE(AArch64ISD::UDOT)
MAKE_CASE(AArch64ISD::SMINV)
@@ -4371,9 +4370,9 @@ SDValue AArch64TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
IntNo == Intrinsic::aarch64_neon_shadd);
bool IsRoundingAdd = (IntNo == Intrinsic::aarch64_neon_srhadd ||
IntNo == Intrinsic::aarch64_neon_urhadd);
- unsigned Opcode =
- IsSignedAdd ? (IsRoundingAdd ? AArch64ISD::SRHADD : AArch64ISD::SHADD)
- : (IsRoundingAdd ? AArch64ISD::URHADD : AArch64ISD::UHADD);
+ unsigned Opcode = IsSignedAdd
+ ? (IsRoundingAdd ? ISD::AVGCEILS : ISD::AVGFLOORS)
+ : (IsRoundingAdd ? ISD::AVGCEILU : ISD::AVGFLOORU);
return DAG.getNode(Opcode, dl, Op.getValueType(), Op.getOperand(1),
Op.getOperand(2));
}
@@ -14243,89 +14242,6 @@ static SDValue performANDCombine(SDNode *N,
return SDValue();
}
-// Attempt to form urhadd(OpA, OpB) from
-// truncate(vlshr(sub(zext(OpB), xor(zext(OpA), Ones(ElemSizeInBits))), 1))
-// or uhadd(OpA, OpB) from truncate(vlshr(add(zext(OpA), zext(OpB)), 1)).
-// The original form of the first expression is
-// truncate(srl(add(zext(OpB), add(zext(OpA), 1)), 1)) and the
-// (OpA + OpB + 1) subexpression will have been changed to (OpB - (~OpA)).
-// Before this function is called the srl will have been lowered to
-// AArch64ISD::VLSHR.
-// This pass can also recognize signed variants of the patterns that use sign
-// extension instead of zero extension and form a srhadd(OpA, OpB) or a
-// shadd(OpA, OpB) from them.
-static SDValue
-performVectorTruncateCombine(SDNode *N, TargetLowering::DAGCombinerInfo &DCI,
- SelectionDAG &DAG) {
- EVT VT = N->getValueType(0);
-
- // Since we are looking for a right shift by a constant value of 1 and we are
- // operating on types at least 16 bits in length (sign/zero extended OpA and
- // OpB, which are at least 8 bits), it follows that the truncate will always
- // discard the shifted-in bit and therefore the right shift will be logical
- // regardless of the signedness of OpA and OpB.
- SDValue Shift = N->getOperand(0);
- if (Shift.getOpcode() != AArch64ISD::VLSHR)
- return SDValue();
-
- // Is the right shift using an immediate value of 1?
- uint64_t ShiftAmount = Shift.getConstantOperandVal(1);
- if (ShiftAmount != 1)
- return SDValue();
-
- SDValue ExtendOpA, ExtendOpB;
- SDValue ShiftOp0 = Shift.getOperand(0);
- unsigned ShiftOp0Opc = ShiftOp0.getOpcode();
- if (ShiftOp0Opc == ISD::SUB) {
-
- SDValue Xor = ShiftOp0.getOperand(1);
- if (Xor.getOpcode() != ISD::XOR)
- return SDValue();
-
- // Is the XOR using a constant amount of all ones in the right hand side?
- uint64_t C;
- if (!isAllConstantBuildVector(Xor.getOperand(1), C))
- return SDValue();
-
- unsigned ElemSizeInBits = VT.getScalarSizeInBits();
- APInt CAsAPInt(ElemSizeInBits, C);
- if (CAsAPInt != APInt::getAllOnes(ElemSizeInBits))
- return SDValue();
-
- ExtendOpA = Xor.getOperand(0);
- ExtendOpB = ShiftOp0.getOperand(0);
- } else if (ShiftOp0Opc == ISD::ADD) {
- ExtendOpA = ShiftOp0.getOperand(0);
- ExtendOpB = ShiftOp0.getOperand(1);
- } else
- return SDValue();
-
- unsigned ExtendOpAOpc = ExtendOpA.getOpcode();
- unsigned ExtendOpBOpc = ExtendOpB.getOpcode();
- if (!(ExtendOpAOpc == ExtendOpBOpc &&
- (ExtendOpAOpc == ISD::ZERO_EXTEND || ExtendOpAOpc == ISD::SIGN_EXTEND)))
- return SDValue();
-
- // Is the result of the right shift being truncated to the same value type as
- // the original operands, OpA and OpB?
- SDValue OpA = ExtendOpA.getOperand(0);
- SDValue OpB = ExtendOpB.getOperand(0);
- EVT OpAVT = OpA.getValueType();
- assert(ExtendOpA.getValueType() == ExtendOpB.getValueType());
- if (!(VT == OpAVT && OpAVT == OpB.getValueType()))
- return SDValue();
-
- SDLoc DL(N);
- bool IsSignExtend = ExtendOpAOpc == ISD::SIGN_EXTEND;
- bool IsRHADD = ShiftOp0Opc == ISD::SUB;
- unsigned HADDOpc = IsSignExtend
- ? (IsRHADD ? AArch64ISD::SRHADD : AArch64ISD::SHADD)
- : (IsRHADD ? AArch64ISD::URHADD : AArch64ISD::UHADD);
- SDValue ResultHADD = DAG.getNode(HADDOpc, DL, VT, OpA, OpB);
-
- return ResultHADD;
-}
-
static bool hasPairwiseAdd(unsigned Opcode, EVT VT, bool FullFP16) {
switch (Opcode) {
case ISD::FADD:
@@ -14428,20 +14344,20 @@ static SDValue performConcatVectorsCombine(SDNode *N,
if (DCI.isBeforeLegalizeOps())
return SDValue();
- // Optimise concat_vectors of two [us]rhadds or [us]hadds that use extracted
- // subvectors from the same original vectors. Combine these into a single
- // [us]rhadd or [us]hadd that operates on the two original vectors. Example:
- // (v16i8 (concat_vectors (v8i8 (urhadd (extract_subvector (v16i8 OpA, <0>),
- // extract_subvector (v16i8 OpB,
- // <0>))),
- // (v8i8 (urhadd (extract_subvector (v16i8 OpA, <8>),
- // extract_subvector (v16i8 OpB,
- // <8>)))))
+ // Optimise concat_vectors of two [us]avgceils or [us]avgfloors that use
+ // extracted subvectors from the same original vectors. Combine these into a
+ // single avg that operates on the two original vectors.
+ // avgceil is the target independant name for rhadd, avgfloor is a hadd.
+ // Example:
+ // (concat_vectors (v8i8 (avgceils (extract_subvector (v16i8 OpA, <0>),
+ // extract_subvector (v16i8 OpB, <0>))),
+ // (v8i8 (avgceils (extract_subvector (v16i8 OpA, <8>),
+ // extract_subvector (v16i8 OpB, <8>)))))
// ->
- // (v16i8(urhadd(v16i8 OpA, v16i8 OpB)))
+ // (v16i8(avgceils(v16i8 OpA, v16i8 OpB)))
if (N->getNumOperands() == 2 && N0Opc == N1Opc &&
- (N0Opc == AArch64ISD::URHADD || N0Opc == AArch64ISD::SRHADD ||
- N0Opc == AArch64ISD::UHADD || N0Opc == AArch64ISD::SHADD)) {
+ (N0Opc == ISD::AVGCEILU || N0Opc == ISD::AVGCEILS ||
+ N0Opc == ISD::AVGFLOORU || N0Opc == ISD::AVGFLOORS)) {
SDValue N00 = N0->getOperand(0);
SDValue N01 = N0->getOperand(1);
SDValue N10 = N1->getOperand(0);
@@ -18022,8 +17938,6 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N,
return performExtendCombine(N, DCI, DAG);
case ISD::SIGN_EXTEND_INREG:
return performSignExtendInRegCombine(N, DCI, DAG);
- case ISD::TRUNCATE:
- return performVectorTruncateCombine(N, DCI, DAG);
case ISD::CONCAT_VECTORS:
return performConcatVectorsCombine(N, DCI, DAG);
case ISD::INSERT_SUBVECTOR:
diff --git a/llvm/lib/Target/AArch64/AArch64ISelLowering.h b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
index 7dfea1fbd216..9bd33bf0bcf2 100644
--- a/llvm/lib/Target/AArch64/AArch64ISelLowering.h
+++ b/llvm/lib/Target/AArch64/AArch64ISelLowering.h
@@ -230,14 +230,6 @@ enum NodeType : unsigned {
SADDV,
UADDV,
- // Vector halving addition
- SHADD,
- UHADD,
-
- // Vector rounding halving addition
- SRHADD,
- URHADD,
-
// Add Long Pairwise
SADDLP,
UADDLP,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
index 790078496462..64336c4489c4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.cpp
@@ -1547,27 +1547,6 @@ findCondCodeUseOperandIdxForBranchOrSelect(const MachineInstr &Instr) {
}
}
-namespace {
-
-struct UsedNZCV {
- bool N = false;
- bool Z = false;
- bool C = false;
- bool V = false;
-
- UsedNZCV() = default;
-
- UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
- this->N |= UsedFlags.N;
- this->Z |= UsedFlags.Z;
- this->C |= UsedFlags.C;
- this->V |= UsedFlags.V;
- return *this;
- }
-};
-
-} // end anonymous namespace
-
/// Find a condition code used by the instruction.
/// Returns AArch64CC::Invalid if either the instruction does not use condition
/// codes or we don't optimize CmpInstr in the presence of such instructions.
@@ -1622,15 +1601,15 @@ static UsedNZCV getUsedNZCV(AArch64CC::CondCode CC) {
return UsedFlags;
}
-/// \returns Conditions flags used after \p CmpInstr in its MachineBB if they
-/// are not containing C or V flags and NZCV flags are not alive in successors
-/// of the same \p CmpInstr and \p MI parent. \returns None otherwise.
+/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
+/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
+/// \returns None otherwise.
///
/// Collect instructions using that flags in \p CCUseInstrs if provided.
-static Optional<UsedNZCV>
-examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
- const TargetRegisterInfo &TRI,
- SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr) {
+Optional<UsedNZCV>
+llvm::examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
+ const TargetRegisterInfo &TRI,
+ SmallVectorImpl<MachineInstr *> *CCUseInstrs) {
MachineBasicBlock *CmpParent = CmpInstr.getParent();
if (MI.getParent() != CmpParent)
return None;
@@ -1652,8 +1631,6 @@ examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
if (Instr.modifiesRegister(AArch64::NZCV, &TRI))
break;
}
- if (NZCVUsedAfterCmp.C || NZCVUsedAfterCmp.V)
- return None;
return NZCVUsedAfterCmp;
}
@@ -1684,7 +1661,8 @@ static bool canInstrSubstituteCmpInstr(MachineInstr &MI, MachineInstr &CmpInstr,
if (!isADDSRegImm(CmpOpcode) && !isSUBSRegImm(CmpOpcode))
return false;
- if (!examineCFlagsUse(MI, CmpInstr, TRI))
+ Optional<UsedNZCV> NZVCUsed = examineCFlagsUse(MI, CmpInstr, TRI);
+ if (!NZVCUsed || NZVCUsed->C || NZVCUsed->V)
return false;
AccessKind AccessToCheck = AK_Write;
@@ -1773,7 +1751,7 @@ static bool canCmpInstrBeRemoved(MachineInstr &MI, MachineInstr &CmpInstr,
examineCFlagsUse(MI, CmpInstr, TRI, &CCUseInstrs);
// Condition flags are not used in CmpInstr basic block successors and only
// Z or N flags allowed to be used after CmpInstr within its basic block
- if (!NZCVUsedAfterCmp)
+ if (!NZCVUsedAfterCmp || NZCVUsedAfterCmp->C || NZCVUsedAfterCmp->V)
return false;
// Z or N flag used after CmpInstr must correspond to the flag used in MI
if ((MIUsedNZCV.Z && NZCVUsedAfterCmp->N) ||
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.h b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
index 1054bea40e68..0da812b1363c 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.h
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.h
@@ -356,6 +356,33 @@ private:
const MachineRegisterInfo *MRI) const;
};
+struct UsedNZCV {
+ bool N = false;
+ bool Z = false;
+ bool C = false;
+ bool V = false;
+
+ UsedNZCV() = default;
+
+ UsedNZCV &operator|=(const UsedNZCV &UsedFlags) {
+ this->N |= UsedFlags.N;
+ this->Z |= UsedFlags.Z;
+ this->C |= UsedFlags.C;
+ this->V |= UsedFlags.V;
+ return *this;
+ }
+};
+
+/// \returns Conditions flags used after \p CmpInstr in its MachineBB if NZCV
+/// flags are not alive in successors of the same \p CmpInstr and \p MI parent.
+/// \returns None otherwise.
+///
+/// Collect instructions using that flags in \p CCUseInstrs if provided.
+Optional<UsedNZCV>
+examineCFlagsUse(MachineInstr &MI, MachineInstr &CmpInstr,
+ const TargetRegisterInfo &TRI,
+ SmallVectorImpl<MachineInstr *> *CCUseInstrs = nullptr);
+
/// Return true if there is an instruction /after/ \p DefMI and before \p UseMI
/// which either reads or clobbers NZCV.
bool isNZCVTouchedInInstructionRange(const MachineInstr &DefMI,
diff --git a/llvm/lib/Target/AArch64/AArch64InstrInfo.td b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
index 788043b916f0..17c11f8bbca4 100644
--- a/llvm/lib/Target/AArch64/AArch64InstrInfo.td
+++ b/llvm/lib/Target/AArch64/AArch64InstrInfo.td
@@ -630,11 +630,6 @@ def AArch64uminv : SDNode<"AArch64ISD::UMINV", SDT_AArch64UnaryVec>;
def AArch64smaxv : SDNode<"AArch64ISD::SMAXV", SDT_AArch64UnaryVec>;
def AArch64umaxv : SDNode<"AArch64ISD::UMAXV", SDT_AArch64UnaryVec>;
-def AArch64srhadd : SDNode<"AArch64ISD::SRHADD", SDT_AArch64binvec>;
-def AArch64urhadd : SDNode<"AArch64ISD::URHADD", SDT_AArch64binvec>;
-def AArch64shadd : SDNode<"AArch64ISD::SHADD", SDT_AArch64binvec>;
-def AArch64uhadd : SDNode<"AArch64ISD::UHADD", SDT_AArch64binvec>;
-
def AArch64uabd : PatFrags<(ops node:$lhs, node:$rhs),
[(abdu node:$lhs, node:$rhs),
(int_aarch64_neon_uabd node:$lhs, node:$rhs)]>;
@@ -4488,7 +4483,7 @@ defm PMUL : SIMDThreeSameVectorB<1, 0b10011, "pmul", int_aarch64_neon_pmul>;
defm SABA : SIMDThreeSameVectorBHSTied<0, 0b01111, "saba",
TriOpFrag<(add node:$LHS, (AArch64sabd node:$MHS, node:$RHS))> >;
defm SABD : SIMDThreeSameVectorBHS<0,0b01110,"sabd", AArch64sabd>;
-defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", AArch64shadd>;
+defm SHADD : SIMDThreeSameVectorBHS<0,0b00000,"shadd", avgfloors>;
defm SHSUB : SIMDThreeSameVectorBHS<0,0b00100,"shsub", int_aarch64_neon_shsub>;
defm SMAXP : SIMDThreeSameVectorBHS<0,0b10100,"smaxp", int_aarch64_neon_smaxp>;
defm SMAX : SIMDThreeSameVectorBHS<0,0b01100,"smax", smax>;
@@ -4500,14 +4495,14 @@ defm SQRDMULH : SIMDThreeSameVectorHS<1,0b10110,"sqrdmulh",int_aarch64_neon_sqrd
defm SQRSHL : SIMDThreeSameVector<0,0b01011,"sqrshl", int_aarch64_neon_sqrshl>;
defm SQSHL : SIMDThreeSameVector<0,0b01001,"sqshl", int_aarch64_neon_sqshl>;
defm SQSUB : SIMDThreeSameVector<0,0b00101,"sqsub", int_aarch64_neon_sqsub>;
-defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", AArch64srhadd>;
+defm SRHADD : SIMDThreeSameVectorBHS<0,0b00010,"srhadd", avgceils>;
defm SRSHL : SIMDThreeSameVector<0,0b01010,"srshl", int_aarch64_neon_srshl>;
defm SSHL : SIMDThreeSameVector<0,0b01000,"sshl", int_aarch64_neon_sshl>;
defm SUB : SIMDThreeSameVector<1,0b10000,"sub", sub>;
defm UABA : SIMDThreeSameVectorBHSTied<1, 0b01111, "uaba",
TriOpFrag<(add node:$LHS, (AArch64uabd node:$MHS, node:$RHS))> >;
defm UABD : SIMDThreeSameVectorBHS<1,0b01110,"uabd", AArch64uabd>;
-defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", AArch64uhadd>;
+defm UHADD : SIMDThreeSameVectorBHS<1,0b00000,"uhadd", avgflooru>;
defm UHSUB : SIMDThreeSameVectorBHS<1,0b00100,"uhsub", int_aarch64_neon_uhsub>;
defm UMAXP : SIMDThreeSameVectorBHS<1,0b10100,"umaxp", int_aarch64_neon_umaxp>;
defm UMAX : SIMDThreeSameVectorBHS<1,0b01100,"umax", umax>;
@@ -4517,7 +4512,7 @@ defm UQADD : SIMDThreeSameVector<1,0b00001,"uqadd", int_aarch64_neon_uqadd>;
defm UQRSHL : SIMDThreeSameVector<1,0b01011,"uqrshl", int_aarch64_neon_uqrshl>;
defm UQSHL : SIMDThreeSameVector<1,0b01001,"uqshl", int_aarch64_neon_uqshl>;
defm UQSUB : SIMDThreeSameVector<1,0b00101,"uqsub", int_aarch64_neon_uqsub>;
-defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", AArch64urhadd>;
+defm URHADD : SIMDThreeSameVectorBHS<1,0b00010,"urhadd", avgceilu>;
defm URSHL : SIMDThreeSameVector<1,0b01010,"urshl", int_aarch64_neon_urshl>;
defm USHL : SIMDThreeSameVector<1,0b01000,"ushl", int_aarch64_neon_ushl>;
defm SQRDMLAH : SIMDThreeSameVectorSQRDMLxHTiedHS<1,0b10000,"sqrdmlah",
diff --git a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
index 1fc5617b49f6..6b593274ab2f 100644
--- a/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
+++ b/llvm/lib/Target/AArch64/AArch64MIPeepholeOpt.cpp
@@ -60,12 +60,13 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
MachineLoopInfo *MLI;
MachineRegisterInfo *MRI;
+ using OpcodePair = std::pair<unsigned, unsigned>;
template <typename T>
using SplitAndOpcFunc =
- std::function<Optional<unsigned>(T, unsigned, T &, T &)>;
+ std::function<Optional<OpcodePair>(T, unsigned, T &, T &)>;
using BuildMIFunc =
- std::function<void(MachineInstr &, unsigned, unsigned, unsigned, Register,
- Register, Register)>;
+ std::function<void(MachineInstr &, OpcodePair, unsigned, unsigned,
+ Register, Register, Register)>;
/// For instructions where an immediate operand could be split into two
/// separate immediate instructions, use the splitTwoPartImm two handle the
@@ -93,6 +94,10 @@ struct AArch64MIPeepholeOpt : public MachineFunctionPass {
bool visitADDSUB(unsigned PosOpc, unsigned NegOpc, MachineInstr &MI,
SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
template <typename T>
+ bool visitADDSSUBS(OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
+
+ template <typename T>
bool visitAND(unsigned Opc, MachineInstr &MI,
SmallSetVector<MachineInstr *, 8> &ToBeRemoved);
bool visitORR(MachineInstr &MI,
@@ -171,20 +176,20 @@ bool AArch64MIPeepholeOpt::visitAND(
return splitTwoPartImm<T>(
MI, ToBeRemoved,
- [Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<unsigned> {
+ [Opc](T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
if (splitBitmaskImm(Imm, RegSize, Imm0, Imm1))
- return Opc;
+ return std::make_pair(Opc, Opc);
return None;
},
- [&TII = TII](MachineInstr &MI, unsigned Opcode, unsigned Imm0,
+ [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
unsigned Imm1, Register SrcReg, Register NewTmpReg,
Register NewDstReg) {
DebugLoc DL = MI.getDebugLoc();
MachineBasicBlock *MBB = MI.getParent();
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg)
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
.addReg(SrcReg)
.addImm(Imm0);
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewDstReg)
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
.addReg(NewTmpReg)
.addImm(Imm1);
});
@@ -273,23 +278,64 @@ bool AArch64MIPeepholeOpt::visitADDSUB(
return splitTwoPartImm<T>(
MI, ToBeRemoved,
[PosOpc, NegOpc](T Imm, unsigned RegSize, T &Imm0,
- T &Imm1) -> Optional<unsigned> {
+ T &Imm1) -> Optional<OpcodePair> {
if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
- return PosOpc;
+ return std::make_pair(PosOpc, PosOpc);
if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
- return NegOpc;
+ return std::make_pair(NegOpc, NegOpc);
return None;
},
- [&TII = TII](MachineInstr &MI, unsigned Opcode, unsigned Imm0,
+ [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
+ unsigned Imm1, Register SrcReg, Register NewTmpReg,
+ Register NewDstReg) {
+ DebugLoc DL = MI.getDebugLoc();
+ MachineBasicBlock *MBB = MI.getParent();
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
+ .addReg(SrcReg)
+ .addImm(Imm0)
+ .addImm(12);
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
+ .addReg(NewTmpReg)
+ .addImm(Imm1)
+ .addImm(0);
+ });
+}
+
+template <typename T>
+bool AArch64MIPeepholeOpt::visitADDSSUBS(
+ OpcodePair PosOpcs, OpcodePair NegOpcs, MachineInstr &MI,
+ SmallSetVector<MachineInstr *, 8> &ToBeRemoved) {
+ // Try the same transformation as ADDSUB but with additional requirement
+ // that the condition code usages are only for Equal and Not Equal
+ return splitTwoPartImm<T>(
+ MI, ToBeRemoved,
+ [PosOpcs, NegOpcs, &MI, &TRI = TRI, &MRI = MRI](
+ T Imm, unsigned RegSize, T &Imm0, T &Imm1) -> Optional<OpcodePair> {
+ OpcodePair OP;
+ if (splitAddSubImm(Imm, RegSize, Imm0, Imm1))
+ OP = PosOpcs;
+ else if (splitAddSubImm(-Imm, RegSize, Imm0, Imm1))
+ OP = NegOpcs;
+ else
+ return None;
+ // Check conditional uses last since it is expensive for scanning
+ // proceeding instructions
+ MachineInstr &SrcMI = *MRI->getUniqueVRegDef(MI.getOperand(1).getReg());
+ Optional<UsedNZCV> NZCVUsed = examineCFlagsUse(SrcMI, MI, *TRI);
+ if (!NZCVUsed || NZCVUsed->C || NZCVUsed->V)
+ return None;
+ return OP;
+ },
+ [&TII = TII](MachineInstr &MI, OpcodePair Opcode, unsigned Imm0,
unsigned Imm1, Register SrcReg, Register NewTmpReg,
Register NewDstReg) {
DebugLoc DL = MI.getDebugLoc();
MachineBasicBlock *MBB = MI.getParent();
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewTmpReg)
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.first), NewTmpReg)
.addReg(SrcReg)
.addImm(Imm0)
.addImm(12);
- BuildMI(*MBB, MI, DL, TII->get(Opcode), NewDstReg)
+ BuildMI(*MBB, MI, DL, TII->get(Opcode.second), NewDstReg)
.addReg(NewTmpReg)
.addImm(Imm1)
.addImm(0);
@@ -357,32 +403,49 @@ bool AArch64MIPeepholeOpt::splitTwoPartImm(
// number since it was sign extended when we assign to the 64-bit Imm.
if (SubregToRegMI)
Imm &= 0xFFFFFFFF;
- unsigned Opcode;
+ OpcodePair Opcode;
if (auto R = SplitAndOpc(Imm, RegSize, Imm0, Imm1))
Opcode = R.getValue();
else
return false;
- // Create new ADD/SUB MIs.
+ // Create new MIs using the first and second opcodes. Opcodes might differ for
+ // flag setting operations that should only set flags on second instruction.
+ // NewTmpReg = Opcode.first SrcReg Imm0
+ // NewDstReg = Opcode.second NewTmpReg Imm1
+
+ // Determine register classes for destinations and register operands
MachineFunction *MF = MI.getMF();
- const TargetRegisterClass *RC =
- TII->getRegClass(TII->get(Opcode), 0, TRI, *MF);
- const TargetRegisterClass *ORC =
- TII->getRegClass(TII->get(Opcode), 1, TRI, *MF);
+ const TargetRegisterClass *FirstInstrDstRC =
+ TII->getRegClass(TII->get(Opcode.first), 0, TRI, *MF);
+ const TargetRegisterClass *FirstInstrOperandRC =
+ TII->getRegClass(TII->get(Opcode.first), 1, TRI, *MF);
+ const TargetRegisterClass *SecondInstrDstRC =
+ (Opcode.first == Opcode.second)
+ ? FirstInstrDstRC
+ : TII->getRegClass(TII->get(Opcode.second), 0, TRI, *MF);
+ const TargetRegisterClass *SecondInstrOperandRC =
+ (Opcode.first == Opcode.second)
+ ? FirstInstrOperandRC
+ : TII->getRegClass(TII->get(Opcode.second), 1, TRI, *MF);
+
+ // Get old registers destinations and new register destinations
Register DstReg = MI.getOperand(0).getReg();
Register SrcReg = MI.getOperand(1).getReg();
- Register NewTmpReg = MRI->createVirtualRegister(RC);
- Register NewDstReg = MRI->createVirtualRegister(RC);
+ Register NewTmpReg = MRI->createVirtualRegister(FirstInstrDstRC);
+ Register NewDstReg = MRI->createVirtualRegister(SecondInstrDstRC);
- MRI->constrainRegClass(SrcReg, RC);
- MRI->constrainRegClass(NewTmpReg, ORC);
+ // Constrain registers based on their new uses
+ MRI->constrainRegClass(SrcReg, FirstInstrOperandRC);
+ MRI->constrainRegClass(NewTmpReg, SecondInstrOperandRC);
MRI->constrainRegClass(NewDstReg, MRI->getRegClass(DstReg));
+ // Call the delegating operation to build the instruction
BuildInstr(MI, Opcode, Imm0, Imm1, SrcReg, NewTmpReg, NewDstReg);
- MRI->replaceRegWith(DstReg, NewDstReg);
// replaceRegWith changes MI's definition register. Keep it for SSA form until
// deleting MI.
+ MRI->replaceRegWith(DstReg, NewDstReg);
MI.getOperand(0).setReg(DstReg);
// Record the MIs need to be removed.
@@ -439,6 +502,26 @@ bool AArch64MIPeepholeOpt::runOnMachineFunction(MachineFunction &MF) {
Changed = visitADDSUB<uint64_t>(AArch64::SUBXri, AArch64::ADDXri, MI,
ToBeRemoved);
break;
+ case AArch64::ADDSWrr:
+ Changed = visitADDSSUBS<uint32_t>({AArch64::ADDWri, AArch64::ADDSWri},
+ {AArch64::SUBWri, AArch64::SUBSWri},
+ MI, ToBeRemoved);
+ break;
+ case AArch64::SUBSWrr:
+ Changed = visitADDSSUBS<uint32_t>({AArch64::SUBWri, AArch64::SUBSWri},
+ {AArch64::ADDWri, AArch64::ADDSWri},
+ MI, ToBeRemoved);
+ break;
+ case AArch64::ADDSXrr:
+ Changed = visitADDSSUBS<uint64_t>({AArch64::ADDXri, AArch64::ADDSXri},
+ {AArch64::SUBXri, AArch64::SUBSXri},
+ MI, ToBeRemoved);
+ break;
+ case AArch64::SUBSXrr:
+ Changed = visitADDSSUBS<uint64_t>({AArch64::SUBXri, AArch64::SUBSXri},
+ {AArch64::ADDXri, AArch64::ADDSXri},
+ MI, ToBeRemoved);
+ break;
}
}
}
diff --git a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
index 8b653e23be8f..59037440c2b9 100644
--- a/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
+++ b/llvm/lib/Target/AArch64/AArch64StackTagging.cpp
@@ -290,14 +290,6 @@ public:
};
class AArch64StackTagging : public FunctionPass {
- struct AllocaInfo {
- AllocaInst *AI;
- TrackingVH<Instruction> OldAI; // Track through RAUW to replace debug uses.
- SmallVector<IntrinsicInst *, 2> LifetimeStart;
- SmallVector<IntrinsicInst *, 2> LifetimeEnd;
- SmallVector<DbgVariableIntrinsic *, 2> DbgVariableIntrinsics;
- };
-
const bool MergeInit;
const bool UseStackSafety;
@@ -313,7 +305,7 @@ public:
}
bool isInterestingAlloca(const AllocaInst &AI);
- void alignAndPadAlloca(AllocaInfo &Info);
+ void alignAndPadAlloca(memtag::AllocaInfo &Info);
void tagAlloca(AllocaInst *AI, Instruction *InsertBefore, Value *Ptr,
uint64_t Size);
@@ -322,9 +314,9 @@ public:
Instruction *collectInitializers(Instruction *StartInst, Value *StartPtr,
uint64_t Size, InitializerBuilder &IB);
- Instruction *
- insertBaseTaggedPointer(const MapVector<AllocaInst *, AllocaInfo> &Allocas,
- const DominatorTree *DT);
+ Instruction *insertBaseTaggedPointer(
+ const MapVector<AllocaInst *, memtag::AllocaInfo> &Allocas,
+ const DominatorTree *DT);
bool runOnFunction(Function &F) override;
StringRef getPassName() const override { return "AArch64 Stack Tagging"; }
@@ -466,12 +458,12 @@ void AArch64StackTagging::untagAlloca(AllocaInst *AI, Instruction *InsertBefore,
}
Instruction *AArch64StackTagging::insertBaseTaggedPointer(
- const MapVector<AllocaInst *, AllocaInfo> &InterestingAllocas,
+ const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument,
const DominatorTree *DT) {
BasicBlock *PrologueBB = nullptr;
// Try sinking IRG as deep as possible to avoid hurting shrink wrap.
- for (auto &I : InterestingAllocas) {
- const AllocaInfo &Info = I.second;
+ for (auto &I : AllocasToInstrument) {
+ const memtag::AllocaInfo &Info = I.second;
AllocaInst *AI = Info.AI;
if (!PrologueBB) {
PrologueBB = AI->getParent();
@@ -490,7 +482,7 @@ Instruction *AArch64StackTagging::insertBaseTaggedPointer(
return Base;
}
-void AArch64StackTagging::alignAndPadAlloca(AllocaInfo &Info) {
+void AArch64StackTagging::alignAndPadAlloca(memtag::AllocaInfo &Info) {
const Align NewAlignment =
max(MaybeAlign(Info.AI->getAlign()), kTagGranuleSize);
Info.AI->setAlignment(NewAlignment);
@@ -536,63 +528,17 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
if (MergeInit)
AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
- MapVector<AllocaInst *, AllocaInfo>
- InterestingAllocas; // need stable iteration order
- SmallVector<Instruction *, 8> RetVec;
- SmallVector<Instruction *, 4> UnrecognizedLifetimes;
-
- bool CallsReturnTwice = false;
- for (Instruction &I : instructions(F)) {
- if (CallInst *CI = dyn_cast<CallInst>(&I)) {
- if (CI->canReturnTwice()) {
- CallsReturnTwice = true;
- }
- }
- if (auto *AI = dyn_cast<AllocaInst>(&I)) {
- if (isInterestingAlloca(*AI)) {
- InterestingAllocas[AI].AI = AI;
- InterestingAllocas[AI].OldAI = AI;
- }
- continue;
- }
-
- if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&I)) {
- for (Value *V : DVI->location_ops())
- if (auto *AI = dyn_cast_or_null<AllocaInst>(V))
- if (isInterestingAlloca(*AI) &&
- (InterestingAllocas[AI].DbgVariableIntrinsics.empty() ||
- InterestingAllocas[AI].DbgVariableIntrinsics.back() != DVI))
- InterestingAllocas[AI].DbgVariableIntrinsics.push_back(DVI);
- continue;
- }
-
- auto *II = dyn_cast<IntrinsicInst>(&I);
- if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end)) {
- AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
- if (!AI) {
- UnrecognizedLifetimes.push_back(&I);
- continue;
- }
- if (!isInterestingAlloca(*AI))
- continue;
- if (II->getIntrinsicID() == Intrinsic::lifetime_start)
- InterestingAllocas[AI].LifetimeStart.push_back(II);
- else
- InterestingAllocas[AI].LifetimeEnd.push_back(II);
- continue;
- }
-
- Instruction *ExitUntag = getUntagLocationIfFunctionExit(I);
- if (ExitUntag)
- RetVec.push_back(ExitUntag);
- }
+ memtag::StackInfoBuilder SIB(
+ [this](const AllocaInst &AI) { return isInterestingAlloca(AI); });
+ for (Instruction &I : instructions(F))
+ SIB.visit(I);
+ memtag::StackInfo &SInfo = SIB.get();
- if (InterestingAllocas.empty())
+ if (SInfo.AllocasToInstrument.empty())
return false;
- for (auto &I : InterestingAllocas) {
- AllocaInfo &Info = I.second;
+ for (auto &I : SInfo.AllocasToInstrument) {
+ memtag::AllocaInfo &Info = I.second;
assert(Info.AI && isInterestingAlloca(*Info.AI));
alignAndPadAlloca(Info);
}
@@ -602,7 +548,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
if (auto *P = getAnalysisIfAvailable<DominatorTreeWrapperPass>())
DT = &P->getDomTree();
- if (DT == nullptr && (InterestingAllocas.size() > 1 ||
+ if (DT == nullptr && (SInfo.AllocasToInstrument.size() > 1 ||
!F->hasFnAttribute(Attribute::OptimizeNone))) {
DeleteDT = std::make_unique<DominatorTree>(*F);
DT = DeleteDT.get();
@@ -621,11 +567,11 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
SetTagFunc =
Intrinsic::getDeclaration(F->getParent(), Intrinsic::aarch64_settag);
- Instruction *Base = insertBaseTaggedPointer(InterestingAllocas, DT);
+ Instruction *Base = insertBaseTaggedPointer(SInfo.AllocasToInstrument, DT);
int NextTag = 0;
- for (auto &I : InterestingAllocas) {
- const AllocaInfo &Info = I.second;
+ for (auto &I : SInfo.AllocasToInstrument) {
+ const memtag::AllocaInfo &Info = I.second;
AllocaInst *AI = Info.AI;
int Tag = NextTag;
NextTag = (NextTag + 1) % 16;
@@ -642,15 +588,15 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
TagPCall->setOperand(0, Info.AI);
bool StandardLifetime =
- UnrecognizedLifetimes.empty() &&
- isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT,
- ClMaxLifetimes);
+ SInfo.UnrecognizedLifetimes.empty() &&
+ memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, DT,
+ ClMaxLifetimes);
// Calls to functions that may return twice (e.g. setjmp) confuse the
// postdominator analysis, and will leave us to keep memory tagged after
// function return. Work around this by always untagging at every return
// statement if return_twice functions are called.
- if (UnrecognizedLifetimes.empty() && StandardLifetime &&
- !CallsReturnTwice) {
+ if (SInfo.UnrecognizedLifetimes.empty() && StandardLifetime &&
+ !SInfo.CallsReturnTwice) {
IntrinsicInst *Start = Info.LifetimeStart[0];
uint64_t Size =
cast<ConstantInt>(Start->getArgOperand(0))->getZExtValue();
@@ -659,8 +605,8 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
auto TagEnd = [&](Instruction *Node) { untagAlloca(AI, Node, Size); };
if (!DT || !PDT ||
- !forAllReachableExits(*DT, *PDT, Start, Info.LifetimeEnd, RetVec,
- TagEnd)) {
+ !memtag::forAllReachableExits(*DT, *PDT, Start, Info.LifetimeEnd,
+ SInfo.RetVec, TagEnd)) {
for (auto *End : Info.LifetimeEnd)
End->eraseFromParent();
}
@@ -668,7 +614,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
uint64_t Size = Info.AI->getAllocationSizeInBits(*DL).getValue() / 8;
Value *Ptr = IRB.CreatePointerCast(TagPCall, IRB.getInt8PtrTy());
tagAlloca(AI, &*IRB.GetInsertPoint(), Ptr, Size);
- for (auto &RI : RetVec) {
+ for (auto &RI : SInfo.RetVec) {
untagAlloca(AI, RI, Size);
}
// We may have inserted tag/untag outside of any lifetime interval.
@@ -686,7 +632,7 @@ bool AArch64StackTagging::runOnFunction(Function &Fn) {
// If we have instrumented at least one alloca, all unrecognized lifetime
// instrinsics have to go.
- for (auto &I : UnrecognizedLifetimes)
+ for (auto &I : SInfo.UnrecognizedLifetimes)
I->eraseFromParent();
return true;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPU.td b/llvm/lib/Target/AMDGPU/AMDGPU.td
index 806c0b18637a..1fb51a54826e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPU.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPU.td
@@ -86,6 +86,12 @@ def FeatureScalarFlatScratchInsts : SubtargetFeature<"scalar-flat-scratch-insts"
"Have s_scratch_* flat memory instructions"
>;
+def FeatureEnableFlatScratch : SubtargetFeature<"enable-flat-scratch",
+ "EnableFlatScratch",
+ "true",
+ "Use scratch_* flat memory instructions to access scratch"
+>;
+
def FeatureAddNoCarryInsts : SubtargetFeature<"add-no-carry-insts",
"AddNoCarryInsts",
"true",
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributes.def b/llvm/lib/Target/AMDGPU/AMDGPUAttributes.def
index a7fa71c016fc..fb9e17fb63e6 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributes.def
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributes.def
@@ -18,6 +18,7 @@ AMDGPU_ATTRIBUTE(DISPATCH_PTR, "amdgpu-no-dispatch-ptr")
AMDGPU_ATTRIBUTE(QUEUE_PTR, "amdgpu-no-queue-ptr")
AMDGPU_ATTRIBUTE(DISPATCH_ID, "amdgpu-no-dispatch-id")
AMDGPU_ATTRIBUTE(IMPLICIT_ARG_PTR, "amdgpu-no-implicitarg-ptr")
+AMDGPU_ATTRIBUTE(HOSTCALL_PTR, "amdgpu-no-hostcall-ptr")
AMDGPU_ATTRIBUTE(WORKGROUP_ID_X, "amdgpu-no-workgroup-id-x")
AMDGPU_ATTRIBUTE(WORKGROUP_ID_Y, "amdgpu-no-workgroup-id-y")
AMDGPU_ATTRIBUTE(WORKGROUP_ID_Z, "amdgpu-no-workgroup-id-z")
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
index bd913b88d759..d398d0a2bb28 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUAttributor.cpp
@@ -12,6 +12,7 @@
#include "AMDGPU.h"
#include "GCNSubtarget.h"
+#include "Utils/AMDGPUBaseInfo.h"
#include "llvm/CodeGen/TargetPassConfig.h"
#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicsR600.h"
@@ -102,7 +103,7 @@ static bool isDSAddress(const Constant *C) {
/// Returns true if the function requires the implicit argument be passed
/// regardless of the function contents.
-static bool funcRequiresImplicitArgPtr(const Function &F) {
+static bool funcRequiresHostcallPtr(const Function &F) {
// Sanitizers require the hostcall buffer passed in the implicit arguments.
return F.hasFnAttribute(Attribute::SanitizeAddress) ||
F.hasFnAttribute(Attribute::SanitizeThread) ||
@@ -341,12 +342,15 @@ struct AAAMDAttributesFunction : public AAAMDAttributes {
// If the function requires the implicit arg pointer due to sanitizers,
// assume it's needed even if explicitly marked as not requiring it.
- const bool NeedsImplicit = funcRequiresImplicitArgPtr(*F);
- if (NeedsImplicit)
+ const bool NeedsHostcall = funcRequiresHostcallPtr(*F);
+ if (NeedsHostcall) {
removeAssumedBits(IMPLICIT_ARG_PTR);
+ removeAssumedBits(HOSTCALL_PTR);
+ }
for (auto Attr : ImplicitAttrs) {
- if (NeedsImplicit && Attr.first == IMPLICIT_ARG_PTR)
+ if (NeedsHostcall &&
+ (Attr.first == IMPLICIT_ARG_PTR || Attr.first == HOSTCALL_PTR))
continue;
if (F->hasFnAttribute(Attr.second))
@@ -405,6 +409,11 @@ struct AAAMDAttributesFunction : public AAAMDAttributes {
removeAssumedBits(QUEUE_PTR);
}
+ if (funcRetrievesHostcallPtr(A)) {
+ removeAssumedBits(IMPLICIT_ARG_PTR);
+ removeAssumedBits(HOSTCALL_PTR);
+ }
+
return getAssumed() != OrigAssumed ? ChangeStatus::CHANGED
: ChangeStatus::UNCHANGED;
}
@@ -486,6 +495,35 @@ private:
return false;
}
+
+ bool funcRetrievesHostcallPtr(Attributor &A) {
+ auto Pos = llvm::AMDGPU::getHostcallImplicitArgPosition();
+
+ // Check if this is a call to the implicitarg_ptr builtin and it
+ // is used to retrieve the hostcall pointer. The implicit arg for
+ // hostcall is not used only if every use of the implicitarg_ptr
+ // is a load that clearly does not retrieve any byte of the
+ // hostcall pointer. We check this by tracing all the uses of the
+ // initial call to the implicitarg_ptr intrinsic.
+ auto DoesNotLeadToHostcallPtr = [&](Instruction &I) {
+ auto &Call = cast<CallBase>(I);
+ if (Call.getIntrinsicID() != Intrinsic::amdgcn_implicitarg_ptr)
+ return true;
+
+ const auto &PointerInfoAA = A.getAAFor<AAPointerInfo>(
+ *this, IRPosition::callsite_returned(Call), DepClassTy::REQUIRED);
+
+ AAPointerInfo::OffsetAndSize OAS(Pos, 8);
+ return PointerInfoAA.forallInterferingAccesses(
+ OAS, [](const AAPointerInfo::Access &Acc, bool IsExact) {
+ return Acc.getRemoteInst()->isDroppable();
+ });
+ };
+
+ bool UsedAssumedInformation = false;
+ return !A.checkForAllCallLikeInstructions(DoesNotLeadToHostcallPtr, *this,
+ UsedAssumedInformation);
+ }
};
AAAMDAttributes &AAAMDAttributes::createForPosition(const IRPosition &IRP,
@@ -638,7 +676,7 @@ public:
AMDGPUInformationCache InfoCache(M, AG, Allocator, nullptr, *TM);
DenseSet<const char *> Allowed(
{&AAAMDAttributes::ID, &AAUniformWorkGroupSize::ID,
- &AAAMDFlatWorkGroupSize::ID, &AACallEdges::ID});
+ &AAAMDFlatWorkGroupSize::ID, &AACallEdges::ID, &AAPointerInfo::ID});
Attributor A(Functions, InfoCache, CGUpdater, &Allowed);
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
index 7fd94a977be7..786fc54c466c 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
+++ b/llvm/lib/Target/AMDGPU/AMDGPUGISel.td
@@ -222,6 +222,9 @@ def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_FMAX, SIbuffer_atomic_fmax>;
def : GINodeEquiv<G_AMDGPU_BUFFER_ATOMIC_CMPSWAP, SIbuffer_atomic_cmpswap>;
def : GINodeEquiv<G_AMDGPU_S_BUFFER_LOAD, SIsbuffer_load>;
+def : GINodeEquiv<G_FPTRUNC_ROUND_UPWARD, SIfptrunc_round_upward>;
+def : GINodeEquiv<G_FPTRUNC_ROUND_DOWNWARD, SIfptrunc_round_downward>;
+
class GISelSop2Pat <
SDPatternOperator node,
Instruction inst,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
index f5018e3a19ac..8cc5c1345b0f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUHSAMetadataStreamer.cpp
@@ -405,7 +405,7 @@ void MetadataStreamerV2::emitHiddenKernelArgs(const Function &Func,
if (HiddenArgNumBytes >= 32) {
if (Func.getParent()->getNamedMetadata("llvm.printf.fmts"))
emitKernelArg(DL, Int8PtrTy, Align(8), ValueKind::HiddenPrintfBuffer);
- else if (Func.getParent()->getFunction("__ockl_hostcall_internal")) {
+ else if (!Func.hasFnAttribute("amdgpu-no-hostcall-ptr")) {
// The printf runtime binding pass should have ensured that hostcall and
// printf are not used in the same module.
assert(!Func.getParent()->getNamedMetadata("llvm.printf.fmts"));
@@ -794,6 +794,7 @@ void MetadataStreamerV3::emitHiddenKernelArgs(const MachineFunction &MF,
msgpack::ArrayDocNode Args) {
auto &Func = MF.getFunction();
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
+ const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
unsigned HiddenArgNumBytes = ST.getImplicitArgNumBytes(Func);
if (!HiddenArgNumBytes)
@@ -822,7 +823,7 @@ void MetadataStreamerV3::emitHiddenKernelArgs(const MachineFunction &MF,
if (M->getNamedMetadata("llvm.printf.fmts"))
emitKernelArg(DL, Int8PtrTy, Align(8), "hidden_printf_buffer", Offset,
Args);
- else if (M->getModuleFlag("amdgpu_hostcall")) {
+ else if (MFI.hasHostcallPtr()) {
// The printf runtime binding pass should have ensured that hostcall and
// printf are not used in the same module.
assert(!M->getNamedMetadata("llvm.printf.fmts"));
@@ -973,6 +974,7 @@ void MetadataStreamerV5::emitHiddenKernelArgs(const MachineFunction &MF,
const GCNSubtarget &ST = MF.getSubtarget<GCNSubtarget>();
const Module *M = Func.getParent();
auto &DL = M->getDataLayout();
+ const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
auto Int64Ty = Type::getInt64Ty(Func.getContext());
auto Int32Ty = Type::getInt32Ty(Func.getContext());
@@ -1011,7 +1013,7 @@ void MetadataStreamerV5::emitHiddenKernelArgs(const MachineFunction &MF,
} else
Offset += 8; // Skipped.
- if (M->getModuleFlag("amdgpu_hostcall")) {
+ if (MFI.hasHostcallPtr()) {
emitKernelArg(DL, Int8PtrTy, Align(8), "hidden_hostcall_buffer", Offset,
Args);
} else
@@ -1041,7 +1043,6 @@ void MetadataStreamerV5::emitHiddenKernelArgs(const MachineFunction &MF,
} else
Offset += 8; // Skipped.
- const SIMachineFunctionInfo &MFI = *MF.getInfo<SIMachineFunctionInfo>();
if (MFI.hasQueuePtr())
emitKernelArg(DL, Int8PtrTy, Align(8), "hidden_queue_ptr", Offset, Args);
}
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
index 23d970f6d1bf..0fbdb0d33b74 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.cpp
@@ -4486,6 +4486,8 @@ const char* AMDGPUTargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(CONST_DATA_PTR)
NODE_NAME_CASE(PC_ADD_REL_OFFSET)
NODE_NAME_CASE(LDS)
+ NODE_NAME_CASE(FPTRUNC_ROUND_UPWARD)
+ NODE_NAME_CASE(FPTRUNC_ROUND_DOWNWARD)
NODE_NAME_CASE(DUMMY_CHAIN)
case AMDGPUISD::FIRST_MEM_OPCODE_NUMBER: break;
NODE_NAME_CASE(LOAD_D16_HI)
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
index b41506157b68..cfd91426270f 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPUISelLowering.h
@@ -483,6 +483,9 @@ enum NodeType : unsigned {
CONST_DATA_PTR,
PC_ADD_REL_OFFSET,
LDS,
+ FPTRUNC_ROUND_UPWARD,
+ FPTRUNC_ROUND_DOWNWARD,
+
DUMMY_CHAIN,
FIRST_MEM_OPCODE_NUMBER = ISD::FIRST_TARGET_MEMORY_OPCODE,
LOAD_D16_HI,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
index ef2b72252ea7..a1a69030df8d 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.cpp
@@ -839,6 +839,11 @@ AMDGPULegalizerInfo::AMDGPULegalizerInfo(const GCNSubtarget &ST_,
.scalarize(0)
.lower();
+ getActionDefinitionsBuilder(G_INTRINSIC_FPTRUNC_ROUND)
+ .customFor({S16, S32})
+ .scalarize(0)
+ .lower();
+
// Lower roundeven into G_FRINT
getActionDefinitionsBuilder({G_INTRINSIC_ROUND, G_INTRINSIC_ROUNDEVEN})
.scalarize(0)
@@ -1759,6 +1764,8 @@ bool AMDGPULegalizerInfo::legalizeCustom(LegalizerHelper &Helper,
case TargetOpcode::G_CTLZ:
case TargetOpcode::G_CTTZ:
return legalizeCTLZ_CTTZ(MI, MRI, B);
+ case TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND:
+ return legalizeFPTruncRound(MI, B);
default:
return false;
}
@@ -4963,6 +4970,27 @@ static bool replaceWithConstant(MachineIRBuilder &B, MachineInstr &MI, int64_t C
return true;
}
+bool AMDGPULegalizerInfo::legalizeFPTruncRound(MachineInstr &MI,
+ MachineIRBuilder &B) const {
+ unsigned Opc;
+ int RoundMode = MI.getOperand(2).getImm();
+
+ if (RoundMode == (int)RoundingMode::TowardPositive)
+ Opc = AMDGPU::G_FPTRUNC_ROUND_UPWARD;
+ else if (RoundMode == (int)RoundingMode::TowardNegative)
+ Opc = AMDGPU::G_FPTRUNC_ROUND_DOWNWARD;
+ else
+ return false;
+
+ B.buildInstr(Opc)
+ .addDef(MI.getOperand(0).getReg())
+ .addUse(MI.getOperand(1).getReg());
+
+ MI.eraseFromParent();
+
+ return true;
+}
+
bool AMDGPULegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
MachineInstr &MI) const {
MachineIRBuilder &B = Helper.MIRBuilder;
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
index 964a41d3d740..291e95dcfb89 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
+++ b/llvm/lib/Target/AMDGPU/AMDGPULegalizerInfo.h
@@ -169,6 +169,8 @@ public:
bool legalizeBVHIntrinsic(MachineInstr &MI, MachineIRBuilder &B) const;
+ bool legalizeFPTruncRound(MachineInstr &MI, MachineIRBuilder &B) const;
+
bool legalizeImageIntrinsic(
MachineInstr &MI, MachineIRBuilder &B,
GISelChangeObserver &Observer,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
index 6e2b5dc471bc..d8133ca052bf 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPULowerModuleLDSPass.cpp
@@ -28,7 +28,7 @@
#include "AMDGPU.h"
#include "Utils/AMDGPUBaseInfo.h"
-#include "Utils/AMDGPULDSUtils.h"
+#include "Utils/AMDGPUMemoryUtils.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/DerivedTypes.h"
diff --git a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
index 56693805cc36..f2b5beaa4079 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPURegisterBankInfo.cpp
@@ -4570,6 +4570,9 @@ AMDGPURegisterBankInfo::getInstrMapping(const MachineInstr &MI) const {
OpdsMapping[0] = AMDGPU::getValueMapping(Bank, 1);
break;
}
+ case AMDGPU::G_FPTRUNC_ROUND_UPWARD:
+ case AMDGPU::G_FPTRUNC_ROUND_DOWNWARD:
+ return getDefaultMappingVOP(MI);
}
return getInstructionMapping(/*ID*/1, /*Cost*/1,
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
index 2475b44b42a3..eb9ed61e695e 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUReplaceLDSUseWithPointer.cpp
@@ -83,7 +83,7 @@
#include "AMDGPU.h"
#include "GCNSubtarget.h"
#include "Utils/AMDGPUBaseInfo.h"
-#include "Utils/AMDGPULDSUtils.h"
+#include "Utils/AMDGPUMemoryUtils.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SetOperations.h"
diff --git a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
index e82f9232b114..f01143c5d7d3 100644
--- a/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
+++ b/llvm/lib/Target/AMDGPU/AMDGPUSubtarget.cpp
@@ -50,11 +50,6 @@ static cl::opt<bool> EnableVGPRIndexMode(
cl::desc("Use GPR indexing mode instead of movrel for vector indexing"),
cl::init(false));
-static cl::opt<bool> EnableFlatScratch(
- "amdgpu-enable-flat-scratch",
- cl::desc("Use flat scratch instructions"),
- cl::init(false));
-
static cl::opt<bool> UseAA("amdgpu-use-aa-in-codegen",
cl::desc("Enable the use of AA during codegen."),
cl::init(true));
@@ -277,6 +272,7 @@ GCNSubtarget::GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
FlatScratchInsts(false),
ScalarFlatScratchInsts(false),
HasArchitectedFlatScratch(false),
+ EnableFlatScratch(false),
AddNoCarryInsts(false),
HasUnpackedD16VMem(false),
LDSMisalignedBug(false),
@@ -314,11 +310,6 @@ GCNSubtarget::GCNSubtarget(const Triple &TT, StringRef GPU, StringRef FS,
*this, *static_cast<AMDGPURegisterBankInfo *>(RegBankInfo.get()), TM));
}
-bool GCNSubtarget::enableFlatScratch() const {
- return flatScratchIsArchitected() ||
- (EnableFlatScratch && hasFlatScratchInsts());
-}
-
unsigned GCNSubtarget::getConstantBusLimit(unsigned Opcode) const {
if (getGeneration() < GFX10)
return 1;
diff --git a/llvm/lib/Target/AMDGPU/GCNSubtarget.h b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
index f34e1051da80..d60960f9d98c 100644
--- a/llvm/lib/Target/AMDGPU/GCNSubtarget.h
+++ b/llvm/lib/Target/AMDGPU/GCNSubtarget.h
@@ -161,6 +161,7 @@ protected:
bool FlatScratchInsts;
bool ScalarFlatScratchInsts;
bool HasArchitectedFlatScratch;
+ bool EnableFlatScratch;
bool AddNoCarryInsts;
bool HasUnpackedD16VMem;
bool LDSMisalignedBug;
@@ -565,6 +566,11 @@ public:
return ScalarFlatScratchInsts;
}
+ bool enableFlatScratch() const {
+ return flatScratchIsArchitected() ||
+ (EnableFlatScratch && hasFlatScratchInsts());
+ }
+
bool hasGlobalAddTidInsts() const {
return GFX10_BEncoding;
}
@@ -765,8 +771,6 @@ public:
return true;
}
- bool enableFlatScratch() const;
-
void overrideSchedPolicy(MachineSchedPolicy &Policy,
unsigned NumRegionInstrs) const override;
diff --git a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
index dc89a3f2554b..ea73fea467c8 100644
--- a/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
+++ b/llvm/lib/Target/AMDGPU/SIISelLowering.cpp
@@ -17,6 +17,7 @@
#include "AMDGPUTargetMachine.h"
#include "SIMachineFunctionInfo.h"
#include "SIRegisterInfo.h"
+#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/LegacyDivergenceAnalysis.h"
#include "llvm/Analysis/OptimizationRemarkEmitter.h"
@@ -602,6 +603,7 @@ SITargetLowering::SITargetLowering(const TargetMachine &TM,
setOperationAction(ISD::SINT_TO_FP, MVT::f16, Promote);
setOperationAction(ISD::UINT_TO_FP, MVT::f16, Promote);
setOperationAction(ISD::FROUND, MVT::f16, Custom);
+ setOperationAction(ISD::FPTRUNC_ROUND, MVT::f16, Custom);
// F16 - VOP2 Actions.
setOperationAction(ISD::BR_CC, MVT::f16, Expand);
@@ -4740,6 +4742,24 @@ SDValue SITargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
return lowerBUILD_VECTOR(Op, DAG);
case ISD::FP_ROUND:
return lowerFP_ROUND(Op, DAG);
+ case ISD::FPTRUNC_ROUND: {
+ unsigned Opc;
+ SDLoc DL(Op);
+
+ if (Op.getOperand(0)->getValueType(0) != MVT::f32)
+ return SDValue();
+
+ // Get the rounding mode from the last operand
+ int RoundMode = cast<ConstantSDNode>(Op.getOperand(1))->getZExtValue();
+ if (RoundMode == (int)RoundingMode::TowardPositive)
+ Opc = AMDGPUISD::FPTRUNC_ROUND_UPWARD;
+ else if (RoundMode == (int)RoundingMode::TowardNegative)
+ Opc = AMDGPUISD::FPTRUNC_ROUND_DOWNWARD;
+ else
+ return SDValue();
+
+ return DAG.getNode(Opc, DL, Op.getNode()->getVTList(), Op->getOperand(0));
+ }
case ISD::TRAP:
return lowerTRAP(Op, DAG);
case ISD::DEBUGTRAP:
diff --git a/llvm/lib/Target/AMDGPU/SIInstrInfo.td b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
index e738d92446d1..73544048e79c 100644
--- a/llvm/lib/Target/AMDGPU/SIInstrInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIInstrInfo.td
@@ -255,6 +255,14 @@ def SIdenorm_mode : SDNode<"AMDGPUISD::DENORM_MODE",
[SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]
>;
+def SIfptrunc_round_upward : SDNode<"AMDGPUISD::FPTRUNC_ROUND_UPWARD",
+ SDTFPRoundOp
+>;
+
+def SIfptrunc_round_downward : SDNode<"AMDGPUISD::FPTRUNC_ROUND_DOWNWARD",
+ SDTFPRoundOp
+>;
+
//===----------------------------------------------------------------------===//
// ValueType helpers
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/AMDGPU/SIInstructions.td b/llvm/lib/Target/AMDGPU/SIInstructions.td
index 952eecb07459..873b4ff3516e 100644
--- a/llvm/lib/Target/AMDGPU/SIInstructions.td
+++ b/llvm/lib/Target/AMDGPU/SIInstructions.td
@@ -186,6 +186,22 @@ def EXIT_STRICT_WQM : SPseudoInstSI <(outs SReg_1:$sdst), (ins SReg_1:$src0)> {
let mayStore = 0;
}
+// Pseudo instructions used for @llvm.fptrunc.round upward
+// and @llvm.fptrunc.round downward.
+// These intrinsics will be legalized to G_FPTRUNC_ROUND_UPWARD
+// and G_FPTRUNC_ROUND_DOWNWARD before being lowered to
+// FPTRUNC_UPWARD_PSEUDO and FPTRUNC_DOWNWARD_PSEUDO.
+// The final codegen is done in the ModeRegister pass.
+let Uses = [MODE, EXEC] in {
+def FPTRUNC_UPWARD_PSEUDO : VPseudoInstSI <(outs VGPR_32:$vdst),
+ (ins VGPR_32:$src0),
+ [(set f16:$vdst, (SIfptrunc_round_upward f32:$src0))]>;
+
+def FPTRUNC_DOWNWARD_PSEUDO : VPseudoInstSI <(outs VGPR_32:$vdst),
+ (ins VGPR_32:$src0),
+ [(set f16:$vdst, (SIfptrunc_round_downward f32:$src0))]>;
+} // End Uses = [MODE, EXEC]
+
// Invert the exec mask and overwrite the inactive lanes of dst with inactive,
// restoring it after we're done.
let Defs = [SCC] in {
@@ -3183,3 +3199,15 @@ def G_SI_CALL : AMDGPUGenericInstruction {
// TODO: Should really base this on the call target
let isConvergent = 1;
}
+
+def G_FPTRUNC_ROUND_UPWARD : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$vdst);
+ let InOperandList = (ins type1:$src0);
+ let hasSideEffects = 0;
+}
+
+def G_FPTRUNC_ROUND_DOWNWARD : AMDGPUGenericInstruction {
+ let OutOperandList = (outs type0:$vdst);
+ let InOperandList = (ins type1:$src0);
+ let hasSideEffects = 0;
+}
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
index 0d89ba1ac168..1bb17a549cbf 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.cpp
@@ -47,6 +47,7 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
WorkItemIDZ(false),
ImplicitBufferPtr(false),
ImplicitArgPtr(false),
+ HostcallPtr(false),
GITPtrHigh(0xffffffff),
HighBitsOf32BitAddress(0),
GDSSize(0) {
@@ -141,6 +142,9 @@ SIMachineFunctionInfo::SIMachineFunctionInfo(const MachineFunction &MF)
if (!F.hasFnAttribute("amdgpu-no-dispatch-id"))
DispatchID = true;
+
+ if (!F.hasFnAttribute("amdgpu-no-hostcall-ptr"))
+ HostcallPtr = true;
}
// FIXME: This attribute is a hack, we just need an analysis on the function
diff --git a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
index e0c5d30be37b..38561f209759 100644
--- a/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
+++ b/llvm/lib/Target/AMDGPU/SIMachineFunctionInfo.h
@@ -421,6 +421,7 @@ private:
// Pointer to where the ABI inserts special kernel arguments separate from the
// user arguments. This is an offset from the KernargSegmentPtr.
bool ImplicitArgPtr : 1;
+ bool HostcallPtr : 1;
bool MayNeedAGPRs : 1;
@@ -696,6 +697,10 @@ public:
return ImplicitArgPtr;
}
+ bool hasHostcallPtr() const {
+ return HostcallPtr;
+ }
+
bool hasImplicitBufferPtr() const {
return ImplicitBufferPtr;
}
diff --git a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
index 24a8879b5684..1f572eedb413 100644
--- a/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
+++ b/llvm/lib/Target/AMDGPU/SIModeRegister.cpp
@@ -162,7 +162,9 @@ FunctionPass *llvm::createSIModeRegisterPass() { return new SIModeRegister(); }
// double precision setting.
Status SIModeRegister::getInstructionMode(MachineInstr &MI,
const SIInstrInfo *TII) {
- if (TII->usesFPDPRounding(MI)) {
+ if (TII->usesFPDPRounding(MI) ||
+ MI.getOpcode() == AMDGPU::FPTRUNC_UPWARD_PSEUDO ||
+ MI.getOpcode() == AMDGPU::FPTRUNC_DOWNWARD_PSEUDO) {
switch (MI.getOpcode()) {
case AMDGPU::V_INTERP_P1LL_F16:
case AMDGPU::V_INTERP_P1LV_F16:
@@ -170,6 +172,18 @@ Status SIModeRegister::getInstructionMode(MachineInstr &MI,
// f16 interpolation instructions need double precision round to zero
return Status(FP_ROUND_MODE_DP(3),
FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_ZERO));
+ case AMDGPU::FPTRUNC_UPWARD_PSEUDO: {
+ // Replacing the pseudo by a real instruction
+ MI.setDesc(TII->get(AMDGPU::V_CVT_F16_F32_e32));
+ return Status(FP_ROUND_MODE_DP(3),
+ FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_INF));
+ }
+ case AMDGPU::FPTRUNC_DOWNWARD_PSEUDO: {
+ // Replacing the pseudo by a real instruction
+ MI.setDesc(TII->get(AMDGPU::V_CVT_F16_F32_e32));
+ return Status(FP_ROUND_MODE_DP(3),
+ FP_ROUND_MODE_DP(FP_ROUND_ROUND_TO_NEGINF));
+ }
default:
return DefaultStatus;
}
diff --git a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
index eb9452f4b85e..71b8b779ba76 100644
--- a/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
+++ b/llvm/lib/Target/AMDGPU/SIRegisterInfo.td
@@ -856,21 +856,12 @@ defm AReg_1024 : ARegClass<32, [v32i32, v32f32, v16i64, v16f64], (add AGPR_1024)
} // End GeneratePressureSet = 0
-// This is not a real register. This is just to have a register to add
-// to VReg_1 that does not alias any real register that would
-// introduce inferred register classes.
-def ARTIFICIAL_VGPR : SIReg <"invalid vgpr", 0> {
- let isArtificial = 1;
-}
-
let GeneratePressureSet = 0 in {
-// FIXME: Should specify an empty set for this. No register should
-// ever be allocated using VReg_1. This is a hack for SelectionDAG
-// that should always be lowered by SILowerI1Copies. TableGen crashes
-// on an empty register set, but also sorts register classes based on
-// the number of registerss in them. Add only one register so this is
+// No register should ever be allocated using VReg_1. This is a hack for
+// SelectionDAG that should always be lowered by SILowerI1Copies. TableGen
+// sorts register classes based on the number of registers in them so this is
// sorted to the end and not preferred over VGPR_32.
-def VReg_1 : SIRegisterClass<"AMDGPU", [i1], 32, (add ARTIFICIAL_VGPR)> {
+def VReg_1 : SIRegisterClass<"AMDGPU", [i1], 32, (add)> {
let Size = 1;
let HasVGPR = 1;
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
index 683be871ff82..7c4780c5062a 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.cpp
@@ -136,6 +136,22 @@ bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI) {
isHsaAbiVersion5(STI);
}
+// FIXME: All such magic numbers about the ABI should be in a
+// central TD file.
+unsigned getHostcallImplicitArgPosition() {
+ switch (AmdhsaCodeObjectVersion) {
+ case 2:
+ case 3:
+ case 4:
+ return 24;
+ case 5:
+ return 80;
+ default:
+ llvm_unreachable("Unexpected code object version");
+ return 0;
+ }
+}
+
#define GET_MIMGBaseOpcodesTable_IMPL
#define GET_MIMGDimInfoTable_IMPL
#define GET_MIMGInfoTable_IMPL
@@ -1427,6 +1443,10 @@ bool isModuleEntryFunctionCC(CallingConv::ID CC) {
}
}
+bool isKernelCC(const Function *Func) {
+ return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
+}
+
bool hasXNACK(const MCSubtargetInfo &STI) {
return STI.getFeatureBits()[AMDGPU::FeatureXNACK];
}
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
index be19b6082d4a..fc8e26313d0e 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUBaseInfo.h
@@ -54,6 +54,9 @@ bool isHsaAbiVersion5(const MCSubtargetInfo *STI);
/// false otherwise.
bool isHsaAbiVersion3AndAbove(const MCSubtargetInfo *STI);
+/// \returns The offset of the hostcall pointer argument from implicitarg_ptr
+unsigned getHostcallImplicitArgPosition();
+
struct GcnBufferFormatInfo {
unsigned Format;
unsigned BitsPerComp;
@@ -738,6 +741,8 @@ bool isEntryFunctionCC(CallingConv::ID CC);
LLVM_READNONE
bool isModuleEntryFunctionCC(CallingConv::ID CC);
+bool isKernelCC(const Function *Func);
+
// FIXME: Remove this when calling conventions cleaned up
LLVM_READNONE
inline bool isKernel(CallingConv::ID CC) {
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.cpp
deleted file mode 100644
index a83ff6667956..000000000000
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-//===- AMDGPULDSUtils.cpp -------------------------------------------------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// AMDGPU LDS related helper utility functions.
-//
-//===----------------------------------------------------------------------===//
-
-#include "AMDGPULDSUtils.h"
-#include "AMDGPU.h"
-#include "Utils/AMDGPUBaseInfo.h"
-#include "llvm/ADT/DepthFirstIterator.h"
-#include "llvm/ADT/SetVector.h"
-#include "llvm/IR/Constants.h"
-#include "llvm/IR/ReplaceConstant.h"
-
-using namespace llvm;
-
-namespace llvm {
-
-namespace AMDGPU {
-
-bool isKernelCC(const Function *Func) {
- return AMDGPU::isModuleEntryFunctionCC(Func->getCallingConv());
-}
-
-Align getAlign(DataLayout const &DL, const GlobalVariable *GV) {
- return DL.getValueOrABITypeAlignment(GV->getPointerAlignment(DL),
- GV->getValueType());
-}
-
-static void collectFunctionUses(User *U, const Function *F,
- SetVector<Instruction *> &InstUsers) {
- SmallVector<User *> Stack{U};
-
- while (!Stack.empty()) {
- U = Stack.pop_back_val();
-
- if (auto *I = dyn_cast<Instruction>(U)) {
- if (I->getFunction() == F)
- InstUsers.insert(I);
- continue;
- }
-
- if (!isa<ConstantExpr>(U))
- continue;
-
- append_range(Stack, U->users());
- }
-}
-
-void replaceConstantUsesInFunction(ConstantExpr *C, const Function *F) {
- SetVector<Instruction *> InstUsers;
-
- collectFunctionUses(C, F, InstUsers);
- for (Instruction *I : InstUsers) {
- convertConstantExprsToInstructions(I, C);
- }
-}
-
-static bool shouldLowerLDSToStruct(const GlobalVariable &GV,
- const Function *F) {
- // We are not interested in kernel LDS lowering for module LDS itself.
- if (F && GV.getName() == "llvm.amdgcn.module.lds")
- return false;
-
- bool Ret = false;
- SmallPtrSet<const User *, 8> Visited;
- SmallVector<const User *, 16> Stack(GV.users());
-
- assert(!F || isKernelCC(F));
-
- while (!Stack.empty()) {
- const User *V = Stack.pop_back_val();
- Visited.insert(V);
-
- if (isa<GlobalValue>(V)) {
- // This use of the LDS variable is the initializer of a global variable.
- // This is ill formed. The address of an LDS variable is kernel dependent
- // and unknown until runtime. It can't be written to a global variable.
- continue;
- }
-
- if (auto *I = dyn_cast<Instruction>(V)) {
- const Function *UF = I->getFunction();
- if (UF == F) {
- // Used from this kernel, we want to put it into the structure.
- Ret = true;
- } else if (!F) {
- // For module LDS lowering, lowering is required if the user instruction
- // is from non-kernel function.
- Ret |= !isKernelCC(UF);
- }
- continue;
- }
-
- // User V should be a constant, recursively visit users of V.
- assert(isa<Constant>(V) && "Expected a constant.");
- append_range(Stack, V->users());
- }
-
- return Ret;
-}
-
-std::vector<GlobalVariable *> findVariablesToLower(Module &M,
- const Function *F) {
- std::vector<llvm::GlobalVariable *> LocalVars;
- for (auto &GV : M.globals()) {
- if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
- continue;
- }
- if (!GV.hasInitializer()) {
- // addrspace(3) without initializer implies cuda/hip extern __shared__
- // the semantics for such a variable appears to be that all extern
- // __shared__ variables alias one another, in which case this transform
- // is not required
- continue;
- }
- if (!isa<UndefValue>(GV.getInitializer())) {
- // Initializers are unimplemented for LDS address space.
- // Leave such variables in place for consistent error reporting.
- continue;
- }
- if (GV.isConstant()) {
- // A constant undef variable can't be written to, and any load is
- // undef, so it should be eliminated by the optimizer. It could be
- // dropped by the back end if not. This pass skips over it.
- continue;
- }
- if (!shouldLowerLDSToStruct(GV, F)) {
- continue;
- }
- LocalVars.push_back(&GV);
- }
- return LocalVars;
-}
-
-} // end namespace AMDGPU
-
-} // end namespace llvm
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h
deleted file mode 100644
index 83ef68cc3f60..000000000000
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPULDSUtils.h
+++ /dev/null
@@ -1,38 +0,0 @@
-//===- AMDGPULDSUtils.h - LDS related helper functions -*- C++ -*----------===//
-//
-// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
-// See https://llvm.org/LICENSE.txt for license information.
-// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
-//
-//===----------------------------------------------------------------------===//
-//
-// AMDGPU LDS related helper utility functions.
-//
-//===----------------------------------------------------------------------===//
-
-#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPULDSUTILS_H
-#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPULDSUTILS_H
-
-#include "llvm/ADT/DenseMap.h"
-#include "llvm/IR/Constants.h"
-
-namespace llvm {
-
-class ConstantExpr;
-
-namespace AMDGPU {
-
-bool isKernelCC(const Function *Func);
-
-Align getAlign(DataLayout const &DL, const GlobalVariable *GV);
-
-std::vector<GlobalVariable *> findVariablesToLower(Module &M,
- const Function *F = nullptr);
-
-/// Replace all uses of constant \p C with instructions in \p F.
-void replaceConstantUsesInFunction(ConstantExpr *C, const Function *F);
-} // end namespace AMDGPU
-
-} // end namespace llvm
-
-#endif // LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPULDSUTILS_H
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.cpp b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.cpp
index d3848c3cb487..f95321240422 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.cpp
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.cpp
@@ -8,12 +8,16 @@
#include "AMDGPUMemoryUtils.h"
#include "AMDGPU.h"
+#include "AMDGPUBaseInfo.h"
+#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallSet.h"
#include "llvm/Analysis/AliasAnalysis.h"
#include "llvm/Analysis/MemorySSA.h"
+#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Instructions.h"
-#include "llvm/IR/IntrinsicsAMDGPU.h"
#include "llvm/IR/IntrinsicInst.h"
+#include "llvm/IR/IntrinsicsAMDGPU.h"
+#include "llvm/IR/ReplaceConstant.h"
#define DEBUG_TYPE "amdgpu-memory-utils"
@@ -23,6 +27,117 @@ namespace llvm {
namespace AMDGPU {
+Align getAlign(DataLayout const &DL, const GlobalVariable *GV) {
+ return DL.getValueOrABITypeAlignment(GV->getPointerAlignment(DL),
+ GV->getValueType());
+}
+
+static void collectFunctionUses(User *U, const Function *F,
+ SetVector<Instruction *> &InstUsers) {
+ SmallVector<User *> Stack{U};
+
+ while (!Stack.empty()) {
+ U = Stack.pop_back_val();
+
+ if (auto *I = dyn_cast<Instruction>(U)) {
+ if (I->getFunction() == F)
+ InstUsers.insert(I);
+ continue;
+ }
+
+ if (!isa<ConstantExpr>(U))
+ continue;
+
+ append_range(Stack, U->users());
+ }
+}
+
+void replaceConstantUsesInFunction(ConstantExpr *C, const Function *F) {
+ SetVector<Instruction *> InstUsers;
+
+ collectFunctionUses(C, F, InstUsers);
+ for (Instruction *I : InstUsers) {
+ convertConstantExprsToInstructions(I, C);
+ }
+}
+
+static bool shouldLowerLDSToStruct(const GlobalVariable &GV,
+ const Function *F) {
+ // We are not interested in kernel LDS lowering for module LDS itself.
+ if (F && GV.getName() == "llvm.amdgcn.module.lds")
+ return false;
+
+ bool Ret = false;
+ SmallPtrSet<const User *, 8> Visited;
+ SmallVector<const User *, 16> Stack(GV.users());
+
+ assert(!F || isKernelCC(F));
+
+ while (!Stack.empty()) {
+ const User *V = Stack.pop_back_val();
+ Visited.insert(V);
+
+ if (isa<GlobalValue>(V)) {
+ // This use of the LDS variable is the initializer of a global variable.
+ // This is ill formed. The address of an LDS variable is kernel dependent
+ // and unknown until runtime. It can't be written to a global variable.
+ continue;
+ }
+
+ if (auto *I = dyn_cast<Instruction>(V)) {
+ const Function *UF = I->getFunction();
+ if (UF == F) {
+ // Used from this kernel, we want to put it into the structure.
+ Ret = true;
+ } else if (!F) {
+ // For module LDS lowering, lowering is required if the user instruction
+ // is from non-kernel function.
+ Ret |= !isKernelCC(UF);
+ }
+ continue;
+ }
+
+ // User V should be a constant, recursively visit users of V.
+ assert(isa<Constant>(V) && "Expected a constant.");
+ append_range(Stack, V->users());
+ }
+
+ return Ret;
+}
+
+std::vector<GlobalVariable *> findVariablesToLower(Module &M,
+ const Function *F) {
+ std::vector<llvm::GlobalVariable *> LocalVars;
+ for (auto &GV : M.globals()) {
+ if (GV.getType()->getPointerAddressSpace() != AMDGPUAS::LOCAL_ADDRESS) {
+ continue;
+ }
+ if (!GV.hasInitializer()) {
+ // addrspace(3) without initializer implies cuda/hip extern __shared__
+ // the semantics for such a variable appears to be that all extern
+ // __shared__ variables alias one another, in which case this transform
+ // is not required
+ continue;
+ }
+ if (!isa<UndefValue>(GV.getInitializer())) {
+ // Initializers are unimplemented for LDS address space.
+ // Leave such variables in place for consistent error reporting.
+ continue;
+ }
+ if (GV.isConstant()) {
+ // A constant undef variable can't be written to, and any load is
+ // undef, so it should be eliminated by the optimizer. It could be
+ // dropped by the back end if not. This pass skips over it.
+ continue;
+ }
+ if (!shouldLowerLDSToStruct(GV, F)) {
+ continue;
+ }
+ LocalVars.push_back(&GV);
+ }
+ return LocalVars;
+}
+
bool isReallyAClobber(const Value *Ptr, MemoryDef *Def, AAResults *AA) {
Instruction *DefInst = Def->getMemoryInst();
diff --git a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
index 97fcbfc8347d..292500a8b77e 100644
--- a/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
+++ b/llvm/lib/Target/AMDGPU/Utils/AMDGPUMemoryUtils.h
@@ -9,16 +9,32 @@
#ifndef LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUMEMORYUTILS_H
#define LLVM_LIB_TARGET_AMDGPU_UTILS_AMDGPUMEMORYUTILS_H
+#include <vector>
+
namespace llvm {
+struct Align;
class AAResults;
+class ConstantExpr;
+class DataLayout;
+class Function;
+class GlobalVariable;
class LoadInst;
class MemoryDef;
class MemorySSA;
+class Module;
class Value;
namespace AMDGPU {
+Align getAlign(DataLayout const &DL, const GlobalVariable *GV);
+
+std::vector<GlobalVariable *> findVariablesToLower(Module &M,
+ const Function *F = nullptr);
+
+/// Replace all uses of constant \p C with instructions in \p F.
+void replaceConstantUsesInFunction(ConstantExpr *C, const Function *F);
+
/// Given a \p Def clobbering a load from \p Ptr accroding to the MSSA check
/// if this is actually a memory update or an artifical clobber to facilitate
/// ordering constraints.
diff --git a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
index 5db3da99e18d..99797b17d03d 100644
--- a/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
+++ b/llvm/lib/Target/AMDGPU/Utils/CMakeLists.txt
@@ -1,7 +1,6 @@
add_llvm_component_library(LLVMAMDGPUUtils
AMDGPUAsmUtils.cpp
AMDGPUBaseInfo.cpp
- AMDGPULDSUtils.cpp
AMDGPUMemoryUtils.cpp
AMDGPUPALMetadata.cpp
AMDKernelCodeTUtils.cpp
diff --git a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
index 0a3dff057ccd..bdd46cfe7152 100644
--- a/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
+++ b/llvm/lib/Target/Hexagon/HexagonPatternsHVX.td
@@ -738,9 +738,14 @@ let Predicates = [UseHVX] in {
def V2Q: OutPatFrag<(ops node:$Vs), (V6_vandvrt $Vs, (A2_tfrsi -1))>;
-let Predicates = [UseHVX] in
- def: Pat<(select I1:$Pu, VecI1:$Qs, VecI1:$Qt),
+let Predicates = [UseHVX] in {
+ def: Pat<(select I1:$Pu, VecQ8:$Qs, VecQ8:$Qt),
+ (V2Q (PS_vselect $Pu, (Q2V $Qs), (Q2V $Qt)))>;
+ def: Pat<(select I1:$Pu, VecQ16:$Qs, VecQ16:$Qt),
(V2Q (PS_vselect $Pu, (Q2V $Qs), (Q2V $Qt)))>;
+ def: Pat<(select I1:$Pu, VecQ32:$Qs, VecQ32:$Qt),
+ (V2Q (PS_vselect $Pu, (Q2V $Qs), (Q2V $Qt)))>;
+}
let Predicates = [UseHVX] in {
def: Pat<(VecQ8 (qtrue)), (PS_qtrue)>;
diff --git a/llvm/lib/Target/M68k/CMakeLists.txt b/llvm/lib/Target/M68k/CMakeLists.txt
index e95126e4a8ee..f4e3a5758432 100644
--- a/llvm/lib/Target/M68k/CMakeLists.txt
+++ b/llvm/lib/Target/M68k/CMakeLists.txt
@@ -8,6 +8,7 @@ tablegen(LLVM M68kGenRegisterBank.inc -gen-register-bank)
tablegen(LLVM M68kGenInstrInfo.inc -gen-instr-info)
tablegen(LLVM M68kGenSubtargetInfo.inc -gen-subtarget)
tablegen(LLVM M68kGenMCCodeBeads.inc -gen-code-beads)
+tablegen(LLVM M68kGenMCCodeEmitter.inc -gen-emitter)
tablegen(LLVM M68kGenMCPseudoLowering.inc -gen-pseudo-lowering)
tablegen(LLVM M68kGenDAGISel.inc -gen-dag-isel)
tablegen(LLVM M68kGenCallingConv.inc -gen-callingconv)
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.cpp b/llvm/lib/Target/M68k/M68kISelLowering.cpp
index dba190a2ebc0..a190057840cd 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.cpp
+++ b/llvm/lib/Target/M68k/M68kISelLowering.cpp
@@ -101,6 +101,9 @@ M68kTargetLowering::M68kTargetLowering(const M68kTargetMachine &TM,
setOperationAction(OP, MVT::i32, Expand);
}
+ for (auto OP : {ISD::SHL_PARTS, ISD::SRA_PARTS, ISD::SRL_PARTS})
+ setOperationAction(OP, MVT::i32, Custom);
+
// Add/Sub overflow ops with MVT::Glues are lowered to CCR dependences.
for (auto VT : {MVT::i8, MVT::i16, MVT::i32}) {
setOperationAction(ISD::ADDC, VT, Custom);
@@ -1354,6 +1357,12 @@ SDValue M68kTargetLowering::LowerOperation(SDValue Op,
return LowerVASTART(Op, DAG);
case ISD::DYNAMIC_STACKALLOC:
return LowerDYNAMIC_STACKALLOC(Op, DAG);
+ case ISD::SHL_PARTS:
+ return LowerShiftLeftParts(Op, DAG);
+ case ISD::SRA_PARTS:
+ return LowerShiftRightParts(Op, DAG, true);
+ case ISD::SRL_PARTS:
+ return LowerShiftRightParts(Op, DAG, false);
}
}
@@ -3239,6 +3248,102 @@ SDValue M68kTargetLowering::LowerDYNAMIC_STACKALLOC(SDValue Op,
return DAG.getMergeValues(Ops, DL);
}
+SDValue M68kTargetLowering::LowerShiftLeftParts(SDValue Op,
+ SelectionDAG &DAG) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // if Shamt - register size < 0: // Shamt < register size
+ // Lo = Lo << Shamt
+ // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (register size - 1 ^ Shamt))
+ // else:
+ // Lo = 0
+ // Hi = Lo << (Shamt - register size)
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
+ SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
+ SDValue ShamtMinusRegisterSize =
+ DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
+ SDValue RegisterSizeMinus1Shamt =
+ DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
+
+ SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
+ SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
+ SDValue ShiftRightLo =
+ DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, RegisterSizeMinus1Shamt);
+ SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
+ SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
+ SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusRegisterSize);
+
+ SDValue CC =
+ DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ return DAG.getMergeValues({Lo, Hi}, DL);
+}
+
+SDValue M68kTargetLowering::LowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
+ bool IsSRA) const {
+ SDLoc DL(Op);
+ SDValue Lo = Op.getOperand(0);
+ SDValue Hi = Op.getOperand(1);
+ SDValue Shamt = Op.getOperand(2);
+ EVT VT = Lo.getValueType();
+
+ // SRA expansion:
+ // if Shamt - register size < 0: // Shamt < register size
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
+ // Hi = Hi >>s Shamt
+ // else:
+ // Lo = Hi >>s (Shamt - register size);
+ // Hi = Hi >>s (register size - 1)
+ //
+ // SRL expansion:
+ // if Shamt - register size < 0: // Shamt < register size
+ // Lo = (Lo >>u Shamt) | ((Hi << 1) << (register size - 1 ^ Shamt))
+ // Hi = Hi >>u Shamt
+ // else:
+ // Lo = Hi >>u (Shamt - register size);
+ // Hi = 0;
+
+ unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
+
+ SDValue Zero = DAG.getConstant(0, DL, VT);
+ SDValue One = DAG.getConstant(1, DL, VT);
+ SDValue MinusRegisterSize = DAG.getConstant(-32, DL, VT);
+ SDValue RegisterSizeMinus1 = DAG.getConstant(32 - 1, DL, VT);
+ SDValue ShamtMinusRegisterSize =
+ DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusRegisterSize);
+ SDValue RegisterSizeMinus1Shamt =
+ DAG.getNode(ISD::XOR, DL, VT, RegisterSizeMinus1, Shamt);
+
+ SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
+ SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
+ SDValue ShiftLeftHi =
+ DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, RegisterSizeMinus1Shamt);
+ SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
+ SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
+ SDValue LoFalse =
+ DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusRegisterSize);
+ SDValue HiFalse =
+ IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, RegisterSizeMinus1) : Zero;
+
+ SDValue CC =
+ DAG.getSetCC(DL, MVT::i8, ShamtMinusRegisterSize, Zero, ISD::SETLT);
+
+ Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
+ Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
+
+ return DAG.getMergeValues({Lo, Hi}, DL);
+}
+
//===----------------------------------------------------------------------===//
// DAG Combine
//===----------------------------------------------------------------------===//
diff --git a/llvm/lib/Target/M68k/M68kISelLowering.h b/llvm/lib/Target/M68k/M68kISelLowering.h
index 9375a99962eb..f759a7d939c8 100644
--- a/llvm/lib/Target/M68k/M68kISelLowering.h
+++ b/llvm/lib/Target/M68k/M68kISelLowering.h
@@ -220,6 +220,8 @@ private:
SDValue LowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
+ SDValue LowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
CallingConv::ID CallConv, bool IsVarArg,
diff --git a/llvm/lib/Target/M68k/M68kInstrArithmetic.td b/llvm/lib/Target/M68k/M68kInstrArithmetic.td
index ef50de576641..4ae9cbd4228f 100644
--- a/llvm/lib/Target/M68k/M68kInstrArithmetic.td
+++ b/llvm/lib/Target/M68k/M68kInstrArithmetic.td
@@ -28,9 +28,34 @@
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
+// OPMODE Encoding
+//===----------------------------------------------------------------------===//
+class MxOpModeEncoding<bits<3> encoding> {
+ bits<3> Value = encoding;
+}
+
+// op EA, Dn
+def MxOpMode8_d_EA : MxOpModeEncoding<0b000>;
+def MxOpMode16_d_EA : MxOpModeEncoding<0b001>;
+def MxOpMode32_d_EA : MxOpModeEncoding<0b010>;
+
+// op Dn, EA
+def MxOpMode8_EA_d : MxOpModeEncoding<0b100>;
+def MxOpMode16_EA_d : MxOpModeEncoding<0b101>;
+def MxOpMode32_EA_d : MxOpModeEncoding<0b110>;
+
+// op EA, An
+def MxOpMode16_a_EA : MxOpModeEncoding<0b011>;
+def MxOpMode32_a_EA : MxOpModeEncoding<0b111>;
+
+
+//===----------------------------------------------------------------------===//
// Encoding
//===----------------------------------------------------------------------===//
+let Defs = [CCR] in {
+let Constraints = "$src = $dst" in {
+
/// Encoding for Normal forms
/// ----------------------------------------------------
/// F E D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0
@@ -38,23 +63,52 @@
/// | | | EFFECTIVE ADDRESS
/// x x x x | REG | OP MODE | MODE | REG
/// ----------------------------------------------------
-class MxArithEncoding<MxBead4Bits CMD, MxEncOpMode OPMODE, MxBead REG,
- MxEncEA EA, MxEncExt EXT>
- : MxEncoding<EA.Reg, EA.DA, EA.Mode, OPMODE.B0, OPMODE.B1, OPMODE.B2, REG,
- CMD,EXT.Imm, EXT.B8, EXT.Scale, EXT.WL, EXT.DAReg>;
-/// Encoding for Extended forms
-/// ------------------------------------------------------
-/// F E D C | B A 9 | 8 | 7 6 | 5 4 | 3 | 2 1 0
-/// ------------------------------------------------------
-/// x x x x | REG Rx | 1 | SIZE | 0 0 | M | REG Ry
-/// ------------------------------------------------------
-/// Rx - destination
-/// Ry - source
-/// M - address mode switch
-class MxArithXEncoding<MxBead4Bits CMD, MxEncSize SIZE, MxBead1Bit MODE,
- MxBeadDReg SRC, MxBeadDReg DST>
- : MxEncoding<SRC, MODE, MxBead2Bits<0b00>, SIZE, MxBead1Bit<0b1>, DST, CMD>;
+// $reg, $ccr <- $reg op $reg
+class MxBiArOp_R_RR_xEA<string MN, SDNode NODE, MxType DST_TYPE, MxType SRC_TYPE,
+ bits<4> CMD>
+ : MxInst<(outs DST_TYPE.ROp:$dst), (ins DST_TYPE.ROp:$src, SRC_TYPE.ROp:$opd),
+ MN#"."#DST_TYPE.Prefix#"\t$opd, $dst",
+ [(set DST_TYPE.VT:$dst, CCR, (NODE DST_TYPE.VT:$src, SRC_TYPE.VT:$opd))]> {
+ let Inst = (descend
+ CMD, (operand "$dst", 3),
+ !cast<MxOpModeEncoding>("MxOpMode"#DST_TYPE.Size#"_"#DST_TYPE.RLet#"_EA").Value,
+ (descend /*MODE without last bit*/0b00,
+ /*REGISTER prefixed with D/A bit*/(operand "$opd", 4))
+ );
+}
+
+/// This Op is similar to the one above except it uses reversed opmode, some
+/// commands(e.g. eor) do not support dEA or rEA modes and require EAd for
+/// register only operations.
+/// NOTE when using dd commands it is irrelevant which opmode to use(as it seems)
+/// but some opcodes support address register and some do not which creates this
+/// mess.
+class MxBiArOp_R_RR_EAd<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
+ : MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, TYPE.ROp:$opd),
+ MN#"."#TYPE.Prefix#"\t$opd, $dst",
+ [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.VT:$opd))]> {
+ let Inst = (descend
+ CMD, (operand "$opd", 3),
+ !cast<MxOpModeEncoding>("MxOpMode"#TYPE.Size#"_EA_"#TYPE.RLet).Value,
+ /*Destination can only be a data register*/
+ /*MODE*/0b000,
+ /*REGISTER*/(operand "$dst", 3));
+}
+
+let mayLoad = 1 in
+class MxBiArOp_R_RM<string MN, SDNode NODE, MxType TYPE, MxOperand OPD, ComplexPattern PAT,
+ bits<4> CMD, MxEncMemOp SRC_ENC>
+ : MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, OPD:$opd),
+ MN#"."#TYPE.Prefix#"\t$opd, $dst",
+ [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, (TYPE.Load PAT:$opd)))]> {
+ let Inst = (ascend
+ (descend CMD, (operand "$dst", 3),
+ !cast<MxOpModeEncoding>("MxOpMode"#TYPE.Size#"_"#TYPE.RLet#"_EA").Value,
+ SRC_ENC.EA),
+ SRC_ENC.Supplement
+ );
+}
/// Encoding for Immediate forms
/// ---------------------------------------------------
@@ -69,211 +123,154 @@ class MxArithXEncoding<MxBead4Bits CMD, MxEncSize SIZE, MxBead1Bit MODE,
/// ---------------------------------------------------
/// NOTE It is used to store an immediate to memory, imm-to-reg are handled with
/// normal version
-class MxArithImmEncoding<MxBead4Bits CMD, MxEncSize SIZE,
- MxEncEA DST_EA, MxEncExt DST_EXT, MxEncExt SRC_EXT>
- : MxEncoding<DST_EA.Reg, DST_EA.DA, DST_EA.Mode, SIZE, CMD, MxBead4Bits<0>,
- // Source
- SRC_EXT.Imm, SRC_EXT.B8, SRC_EXT.Scale,
- SRC_EXT.WL, SRC_EXT.DAReg,
- // Destination
- DST_EXT.Imm, DST_EXT.B8, DST_EXT.Scale,
- DST_EXT.WL, DST_EXT.DAReg>;
-
-
-//===----------------------------------------------------------------------===//
-// Add/Sub
-//===----------------------------------------------------------------------===//
-
-let Defs = [CCR] in {
-let Constraints = "$src = $dst" in {
-
-// $reg, $ccr <- $reg op $reg
-class MxBiArOp_RFRR_xEA<string MN, SDNode NODE, MxType DST_TYPE, MxType SRC_TYPE,
- bits<4> CMD, MxBead REG>
- : MxInst<(outs DST_TYPE.ROp:$dst), (ins DST_TYPE.ROp:$src, SRC_TYPE.ROp:$opd),
- MN#"."#DST_TYPE.Prefix#"\t$opd, $dst",
- [(set DST_TYPE.VT:$dst, CCR, (NODE DST_TYPE.VT:$src, SRC_TYPE.VT:$opd))],
- MxArithEncoding<MxBead4Bits<CMD>,
- !cast<MxEncOpMode>("MxOpMode"#DST_TYPE.Size#DST_TYPE.RLet#"EA"),
- REG,
- !cast<MxEncEA>("MxEncEA"#SRC_TYPE.RLet#"_2"),
- MxExtEmpty>>;
-
-/// This Op is similar to the one above except it uses reversed opmode, some
-/// commands(e.g. eor) do not support dEA or rEA modes and require EAd for
-/// register only operations.
-/// NOTE when using dd commands it is irrelevant which opmode to use(as it seems)
-/// but some opcodes support address register and some do not which creates this
-/// mess.
-class MxBiArOp_RFRR_EAd<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
- : MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, TYPE.ROp:$opd),
- MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.VT:$opd))],
- MxArithEncoding<MxBead4Bits<CMD>,
- !cast<MxEncOpMode>("MxOpMode"#TYPE.Size#"EAd"),
- MxBeadDReg<2>, MxEncEAd_0, MxExtEmpty>>;
// $reg <- $reg op $imm
-class MxBiArOp_RFRI_xEA<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
+class MxBiArOp_R_RI_xEA<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
: MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, TYPE.IOp:$opd),
MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.IPat:$opd))],
- MxArithEncoding<MxBead4Bits<CMD>,
- !cast<MxEncOpMode>("MxOpMode"#TYPE.Size#TYPE.RLet#"EA"),
- MxBeadDReg<0>, MxEncEAi,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_2")>>;
+ [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.IPat:$opd))]> {
+ let Inst = (ascend
+ (descend CMD, (operand "$dst", 3),
+ !cast<MxOpModeEncoding>("MxOpMode"#TYPE.Size#"_"#TYPE.RLet#"_EA").Value,
+ MxEncAddrMode_i<"opd", TYPE.Size>.EA),
+ MxEncAddrMode_i<"opd", TYPE.Size>.Supplement
+ );
+}
// Again, there are two ways to write an immediate to Dn register either dEA
// opmode or using *I encoding, and again some instrucitons also support address
// registers some do not.
-class MxBiArOp_RFRI<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
+class MxBiArOp_R_RI<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
: MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, TYPE.IOp:$opd),
MN#"i."#TYPE.Prefix#"\t$opd, $dst",
- [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.IPat:$opd))],
- MxArithImmEncoding<MxBead4Bits<CMD>, !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- !cast<MxEncEA>("MxEncEA"#TYPE.RLet#"_0"), MxExtEmpty,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_2")>>;
-
-let mayLoad = 1 in
-class MxBiArOp_RFRM<string MN, SDNode NODE, MxType TYPE, MxOperand OPD, ComplexPattern PAT,
- bits<4> CMD, MxEncEA EA, MxEncExt EXT>
- : MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, OPD:$opd),
- MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, (TYPE.Load PAT:$opd)))],
- MxArithEncoding<MxBead4Bits<CMD>,
- !cast<MxEncOpMode>("MxOpMode"#TYPE.Size#TYPE.RLet#"EA"),
- MxBeadDReg<0>, EA, EXT>>;
-
+ [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.IPat:$opd))]> {
+ let Inst = (ascend
+ (descend 0b0000, CMD,
+ !cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ // The destination cannot be address register, so it's always
+ // the MODE for data register direct mode.
+ /*MODE*/0b000,
+ /*REGISTER*/(operand "$dst", 3)),
+ // Source (i.e. immediate value) encoding
+ MxEncAddrMode_i<"opd", TYPE.Size>.Supplement
+ );
+}
} // Constraints
let mayLoad = 1, mayStore = 1 in {
// FIXME MxBiArOp_FMR/FMI cannot consume CCR from MxAdd/MxSub which leads for
// MxAdd to survive the match and subsequent mismatch.
-class MxBiArOp_FMR<string MN, MxType TYPE, MxOperand MEMOpd,
- bits<4> CMD, MxEncEA EA, MxEncExt EXT>
+class MxBiArOp_MR<string MN, MxType TYPE,
+ MxOperand MEMOpd, bits<4> CMD, MxEncMemOp DST_ENC>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.ROp:$opd),
- MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [],
- MxArithEncoding<MxBead4Bits<CMD>,
- !cast<MxEncOpMode>("MxOpMode"#TYPE.Size#"EA"#TYPE.RLet),
- MxBeadDReg<1>, EA, EXT>>;
+ MN#"."#TYPE.Prefix#"\t$opd, $dst", []> {
+ let Inst = (ascend
+ (descend CMD, (operand "$opd", 3),
+ !cast<MxOpModeEncoding>("MxOpMode"#TYPE.Size#"_EA_"#TYPE.RLet).Value,
+ DST_ENC.EA),
+ DST_ENC.Supplement
+ );
+}
-class MxBiArOp_FMI<string MN, MxType TYPE, MxOperand MEMOpd,
- bits<4> CMD, MxEncEA MEMEA, MxEncExt MEMExt>
+class MxBiArOp_MI<string MN, MxType TYPE,
+ MxOperand MEMOpd, bits<4> CMD, MxEncMemOp DST_ENC>
: MxInst<(outs), (ins MEMOpd:$dst, TYPE.IOp:$opd),
- MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [],
- MxArithImmEncoding<MxBead4Bits<CMD>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MEMEA, MEMExt,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_1")>>;
+ MN#"."#TYPE.Prefix#"\t$opd, $dst", []> {
+ let Inst = (ascend
+ (descend 0b0000, CMD,
+ !cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ DST_ENC.EA),
+ // Source (i.e. immediate value) encoding
+ MxEncAddrMode_i<"opd", TYPE.Size>.Supplement,
+ // Destination encoding
+ DST_ENC.Supplement
+ );
+}
} // mayLoad, mayStore
} // Defs = [CCR]
multiclass MxBiArOp_DF<string MN, SDNode NODE, bit isComm,
bits<4> CMD, bits<4> CMDI> {
- // op $mem, $reg
- def NAME#"8dk" : MxBiArOp_RFRM<MN, NODE, MxType8d, MxType8.KOp, MxType8.KPat,
- CMD, MxEncEAk, MxExtBrief_2>;
- def NAME#"16dk" : MxBiArOp_RFRM<MN, NODE, MxType16d, MxType16.KOp, MxType16.KPat,
- CMD, MxEncEAk, MxExtBrief_2>;
- def NAME#"32dk" : MxBiArOp_RFRM<MN, NODE, MxType32d, MxType32.KOp, MxType32.KPat,
- CMD, MxEncEAk, MxExtBrief_2>;
-
- def NAME#"8dq" : MxBiArOp_RFRM<MN, NODE, MxType8d, MxType8.QOp, MxType8.QPat,
- CMD, MxEncEAq, MxExtI16_2>;
- def NAME#"16dq" : MxBiArOp_RFRM<MN, NODE, MxType16d, MxType16.QOp, MxType16.QPat,
- CMD, MxEncEAq, MxExtI16_2>;
- def NAME#"32dq" : MxBiArOp_RFRM<MN, NODE, MxType32d, MxType32.QOp, MxType32.QPat,
- CMD, MxEncEAq, MxExtI16_2>;
-
- def NAME#"8dp" : MxBiArOp_RFRM<MN, NODE, MxType8d, MxType8.POp, MxType8.PPat,
- CMD, MxEncEAp_2, MxExtI16_2>;
- def NAME#"16dp" : MxBiArOp_RFRM<MN, NODE, MxType16d, MxType16.POp, MxType16.PPat,
- CMD, MxEncEAp_2, MxExtI16_2>;
- def NAME#"32dp" : MxBiArOp_RFRM<MN, NODE, MxType32d, MxType32.POp, MxType32.PPat,
- CMD, MxEncEAp_2, MxExtI16_2>;
-
- def NAME#"8df" : MxBiArOp_RFRM<MN, NODE, MxType8d, MxType8.FOp, MxType8.FPat,
- CMD, MxEncEAf_2, MxExtBrief_2>;
- def NAME#"16df" : MxBiArOp_RFRM<MN, NODE, MxType16d, MxType16.FOp, MxType16.FPat,
- CMD, MxEncEAf_2, MxExtBrief_2>;
- def NAME#"32df" : MxBiArOp_RFRM<MN, NODE, MxType32d, MxType32.FOp, MxType32.FPat,
- CMD, MxEncEAf_2, MxExtBrief_2>;
-
- def NAME#"8dj" : MxBiArOp_RFRM<MN, NODE, MxType8d, MxType8.JOp, MxType8.JPat,
- CMD, MxEncEAj_2, MxExtEmpty>;
- def NAME#"16dj" : MxBiArOp_RFRM<MN, NODE, MxType16d, MxType16.JOp, MxType16.JPat,
- CMD, MxEncEAj_2, MxExtEmpty>;
- def NAME#"32dj" : MxBiArOp_RFRM<MN, NODE, MxType32d, MxType32.JOp, MxType32.JPat,
- CMD, MxEncEAj_2, MxExtEmpty>;
-
- // op $imm, $reg
- def NAME#"8di" : MxBiArOp_RFRI_xEA<MN, NODE, MxType8d, CMD>;
- def NAME#"16di" : MxBiArOp_RFRI_xEA<MN, NODE, MxType16d, CMD>;
- def NAME#"32di" : MxBiArOp_RFRI_xEA<MN, NODE, MxType32d, CMD>;
-
- // op $reg, $mem
- def NAME#"8pd" : MxBiArOp_FMR<MN, MxType8d, MxType8.POp,
- CMD, MxEncEAp_0, MxExtI16_0>;
- def NAME#"16pd" : MxBiArOp_FMR<MN, MxType16d, MxType16.POp,
- CMD, MxEncEAp_0, MxExtI16_0>;
- def NAME#"32pd" : MxBiArOp_FMR<MN, MxType32d, MxType32.POp,
- CMD, MxEncEAp_0, MxExtI16_0>;
-
- def NAME#"8fd" : MxBiArOp_FMR<MN, MxType8d, MxType8.FOp,
- CMD, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"16fd" : MxBiArOp_FMR<MN, MxType16d, MxType16.FOp,
- CMD, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"32fd" : MxBiArOp_FMR<MN, MxType32d, MxType32.FOp,
- CMD, MxEncEAf_0, MxExtBrief_0>;
-
- def NAME#"8jd" : MxBiArOp_FMR<MN, MxType8d, MxType8.JOp,
- CMD, MxEncEAj_0, MxExtEmpty>;
- def NAME#"16jd" : MxBiArOp_FMR<MN, MxType16d, MxType16.JOp,
- CMD, MxEncEAj_0, MxExtEmpty>;
- def NAME#"32jd" : MxBiArOp_FMR<MN, MxType32d, MxType32.JOp,
- CMD, MxEncEAj_0, MxExtEmpty>;
-
- // op $imm, $mem
- def NAME#"8pi" : MxBiArOp_FMI<MN, MxType8, MxType8.POp,
- CMDI, MxEncEAp_0, MxExtI16_0>;
- def NAME#"16pi" : MxBiArOp_FMI<MN, MxType16, MxType16.POp,
- CMDI, MxEncEAp_0, MxExtI16_0>;
- def NAME#"32pi" : MxBiArOp_FMI<MN, MxType32, MxType32.POp,
- CMDI, MxEncEAp_0, MxExtI16_0>;
-
- def NAME#"8fi" : MxBiArOp_FMI<MN, MxType8, MxType8.FOp,
- CMDI, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"16fi" : MxBiArOp_FMI<MN, MxType16, MxType16.FOp,
- CMDI, MxEncEAf_0, MxExtBrief_0>;
- def NAME#"32fi" : MxBiArOp_FMI<MN, MxType32, MxType32.FOp,
- CMDI, MxEncEAf_0, MxExtBrief_0>;
-
- def NAME#"8ji" : MxBiArOp_FMI<MN, MxType8, MxType8.JOp,
- CMDI, MxEncEAj_0, MxExtEmpty>;
- def NAME#"16ji" : MxBiArOp_FMI<MN, MxType16, MxType16.JOp,
- CMDI, MxEncEAj_0, MxExtEmpty>;
- def NAME#"32ji" : MxBiArOp_FMI<MN, MxType32, MxType32.JOp,
- CMDI, MxEncEAj_0, MxExtEmpty>;
-
- def NAME#"16dr" : MxBiArOp_RFRR_xEA<MN, NODE, MxType16d, MxType16r,
- CMD, MxBeadDReg<0>>;
- def NAME#"32dr" : MxBiArOp_RFRR_xEA<MN, NODE, MxType32d, MxType32r,
- CMD, MxBeadDReg<0>>;
-
- let isCommutable = isComm in {
-
- def NAME#"8dd" : MxBiArOp_RFRR_xEA<MN, NODE, MxType8d, MxType8d,
- CMD, MxBeadDReg<0>>;
- def NAME#"16dd" : MxBiArOp_RFRR_xEA<MN, NODE, MxType16d, MxType16d,
- CMD, MxBeadDReg<0>>;
- def NAME#"32dd" : MxBiArOp_RFRR_xEA<MN, NODE, MxType32d, MxType32d,
- CMD, MxBeadDReg<0>>;
-
- } // isComm
+ foreach SZ = [8, 16, 32] in {
+ // op $mem, $reg
+ def NAME#SZ#"dk" : MxBiArOp_R_RM<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).KOp,
+ !cast<MxType>("MxType"#SZ).KPat,
+ CMD, MxEncAddrMode_k<"opd">>;
+
+ def NAME#SZ#"dq" : MxBiArOp_R_RM<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).QOp,
+ !cast<MxType>("MxType"#SZ).QPat,
+ CMD, MxEncAddrMode_q<"opd">>;
+
+ def NAME#SZ#"dp" : MxBiArOp_R_RM<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).POp,
+ !cast<MxType>("MxType"#SZ).PPat,
+ CMD, MxEncAddrMode_p<"opd">>;
+
+ def NAME#SZ#"df" : MxBiArOp_R_RM<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).FOp,
+ !cast<MxType>("MxType"#SZ).FPat,
+ CMD, MxEncAddrMode_f<"opd">>;
+
+ def NAME#SZ#"dj" : MxBiArOp_R_RM<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).JOp,
+ !cast<MxType>("MxType"#SZ).JPat,
+ CMD, MxEncAddrMode_j<"opd">>;
+ // op $imm, $reg
+ def NAME#SZ#"di" : MxBiArOp_R_RI_xEA<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ CMD>;
+ // op $reg, $mem
+ def NAME#SZ#"pd" : MxBiArOp_MR<MN,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).POp,
+ CMD, MxEncAddrMode_p<"dst">>;
+
+ def NAME#SZ#"fd" : MxBiArOp_MR<MN,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).FOp,
+ CMD, MxEncAddrMode_f<"dst">>;
+
+ def NAME#SZ#"jd" : MxBiArOp_MR<MN,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ).JOp,
+ CMD, MxEncAddrMode_j<"dst">>;
+ // op $imm, $mem
+ def NAME#SZ#"pi" : MxBiArOp_MI<MN,
+ !cast<MxType>("MxType"#SZ),
+ !cast<MxType>("MxType"#SZ).POp,
+ CMDI, MxEncAddrMode_p<"dst">>;
+
+ def NAME#SZ#"fi" : MxBiArOp_MI<MN,
+ !cast<MxType>("MxType"#SZ),
+ !cast<MxType>("MxType"#SZ).FOp,
+ CMDI, MxEncAddrMode_f<"dst">>;
+
+ def NAME#SZ#"ji" : MxBiArOp_MI<MN,
+ !cast<MxType>("MxType"#SZ),
+ !cast<MxType>("MxType"#SZ).JOp,
+ CMDI, MxEncAddrMode_j<"dst">>;
+ // op $reg, $reg
+ let isCommutable = isComm in
+ def NAME#SZ#"dd" : MxBiArOp_R_RR_xEA<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ#"d"),
+ CMD>;
+ } // foreach SZ
+
+ foreach SZ = [16, 32] in
+ def NAME#SZ#"dr" : MxBiArOp_R_RR_xEA<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ !cast<MxType>("MxType"#SZ#"r"),
+ CMD>;
} // MxBiArOp_DF
@@ -284,25 +281,28 @@ multiclass MxBiArOp_DF<string MN, SDNode NODE, bit isComm,
let Pattern = [(null_frag)] in
multiclass MxBiArOp_AF<string MN, SDNode NODE, bits<4> CMD> {
- def NAME#"32ak" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.KOp, MxType32.KPat,
- CMD, MxEncEAk, MxExtBrief_2>;
- def NAME#"32aq" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.QOp, MxType32.QPat,
- CMD, MxEncEAq, MxExtI16_2>;
- def NAME#"32af" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.FOp, MxType32.FPat,
- CMD, MxEncEAf_2, MxExtBrief_2>;
- def NAME#"32ap" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.POp, MxType32.PPat,
- CMD, MxEncEAp_2, MxExtI16_2>;
- def NAME#"32aj" : MxBiArOp_RFRM<MN, NODE, MxType32a, MxType32.JOp, MxType32.JPat,
- CMD, MxEncEAj_2, MxExtEmpty>;
- def NAME#"32ai" : MxBiArOp_RFRI_xEA<MN, NODE, MxType32a, CMD>;
-
- def NAME#"32ar" : MxBiArOp_RFRR_xEA<MN, NODE, MxType32a, MxType32r,
- CMD, MxBeadReg<0>>;
+ def NAME#"32ak" : MxBiArOp_R_RM<MN, NODE, MxType32a, MxType32.KOp, MxType32.KPat,
+ CMD, MxEncAddrMode_k<"opd">>;
+ def NAME#"32aq" : MxBiArOp_R_RM<MN, NODE, MxType32a, MxType32.QOp, MxType32.QPat,
+ CMD, MxEncAddrMode_q<"opd">>;
+ def NAME#"32af" : MxBiArOp_R_RM<MN, NODE, MxType32a, MxType32.FOp, MxType32.FPat,
+ CMD, MxEncAddrMode_f<"opd">>;
+ def NAME#"32ap" : MxBiArOp_R_RM<MN, NODE, MxType32a, MxType32.POp, MxType32.PPat,
+ CMD, MxEncAddrMode_p<"opd">>;
+ def NAME#"32aj" : MxBiArOp_R_RM<MN, NODE, MxType32a, MxType32.JOp, MxType32.JPat,
+ CMD, MxEncAddrMode_j<"opd">>;
+ def NAME#"32ai" : MxBiArOp_R_RI_xEA<MN, NODE, MxType32a, CMD>;
+
+ def NAME#"32ar" : MxBiArOp_R_RR_xEA<MN, NODE, MxType32a, MxType32r, CMD>;
} // MxBiArOp_AF
// NOTE These naturally produce CCR
+//===----------------------------------------------------------------------===//
+// Add/Sub
+//===----------------------------------------------------------------------===//
+
defm ADD : MxBiArOp_DF<"add", MxAdd, 1, 0xD, 0x6>;
defm ADD : MxBiArOp_AF<"adda", MxAdd, 0xD>;
defm SUB : MxBiArOp_DF<"sub", MxSub, 0, 0x9, 0x4>;
@@ -312,26 +312,42 @@ defm SUB : MxBiArOp_AF<"suba", MxSub, 0x9>;
let Uses = [CCR], Defs = [CCR] in {
let Constraints = "$src = $dst" in {
+/// Encoding for Extended forms
+/// ------------------------------------------------------
+/// F E D C | B A 9 | 8 | 7 6 | 5 4 | 3 | 2 1 0
+/// ------------------------------------------------------
+/// x x x x | REG Rx | 1 | SIZE | 0 0 | M | REG Ry
+/// ------------------------------------------------------
+/// Rx - destination
+/// Ry - source
+/// M - address mode switch
+
// $reg, ccr <- $reg op $reg op ccr
-class MxBiArOp_RFRRF<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
+class MxBiArOp_R_RRX<string MN, SDNode NODE, MxType TYPE, bits<4> CMD>
: MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src, TYPE.ROp:$opd),
MN#"."#TYPE.Prefix#"\t$opd, $dst",
- [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.VT:$opd, CCR))],
- MxArithXEncoding<MxBead4Bits<CMD>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MxBead1Bit<0>, MxBeadDReg<2>, MxBeadDReg<0>>>;
-
+ [(set TYPE.VT:$dst, CCR, (NODE TYPE.VT:$src, TYPE.VT:$opd, CCR))]> {
+ let Inst = (descend CMD,
+ // Destination register
+ (operand "$dst", 3),
+ 0b1,
+ // SIZE
+ !cond(!eq(TYPE.Size, 8): 0b00,
+ !eq(TYPE.Size, 16): 0b01,
+ !eq(TYPE.Size, 32): 0b10),
+ 0b00, /*R/M*/0b0,
+ // Source register
+ (operand "$opd", 3)
+ );
+}
} // Constraints
} // Uses, Defs
multiclass MxBiArOp_RFF<string MN, SDNode NODE, bit isComm, bits<4> CMD> {
let isCommutable = isComm in {
-
- def NAME#"8dd" : MxBiArOp_RFRRF<MN, NODE, MxType8d, CMD>;
- def NAME#"16dd" : MxBiArOp_RFRRF<MN, NODE, MxType16d, CMD>;
- def NAME#"32dd" : MxBiArOp_RFRRF<MN, NODE, MxType32d, CMD>;
-
+ foreach SZ = [8, 16, 32] in
+ def NAME#SZ#"dd" : MxBiArOp_R_RRX<MN, NODE, !cast<MxType>("MxType"#SZ#"d"), CMD>;
} // isComm
} // MxBiArOp_RFF
@@ -349,19 +365,16 @@ defm AND : MxBiArOp_DF<"and", MxAnd, 1, 0xC, 0x2>;
defm OR : MxBiArOp_DF<"or", MxOr, 1, 0x8, 0x0>;
multiclass MxBiArOp_DF_EAd<string MN, SDNode NODE, bits<4> CMD, bits<4> CMDI> {
-
- let isCommutable = 1 in {
-
- def NAME#"8dd" : MxBiArOp_RFRR_EAd<MN, NODE, MxType8d, CMD>;
- def NAME#"16dd" : MxBiArOp_RFRR_EAd<MN, NODE, MxType16d, CMD>;
- def NAME#"32dd" : MxBiArOp_RFRR_EAd<MN, NODE, MxType32d, CMD>;
-
- } // isCommutable = 1
-
- def NAME#"8di" : MxBiArOp_RFRI<MN, NODE, MxType8d, CMDI>;
- def NAME#"16di" : MxBiArOp_RFRI<MN, NODE, MxType16d, CMDI>;
- def NAME#"32di" : MxBiArOp_RFRI<MN, NODE, MxType32d, CMDI>;
-
+ foreach SZ = [8, 16, 32] in {
+ let isCommutable = 1 in
+ def NAME#SZ#"dd" : MxBiArOp_R_RR_EAd<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ CMD>;
+
+ def NAME#SZ#"di" : MxBiArOp_R_RI<MN, NODE,
+ !cast<MxType>("MxType"#SZ#"d"),
+ CMDI>;
+ } // foreach SZ
} // MxBiArOp_DF_EAd
defm XOR : MxBiArOp_DF_EAd<"eor", MxXor, 0xB, 0xA>;
@@ -372,84 +385,112 @@ defm XOR : MxBiArOp_DF_EAd<"eor", MxXor, 0xB, 0xA>;
//===----------------------------------------------------------------------===//
let Defs = [CCR] in {
-class MxCmp_RR<MxType LHS_TYPE, MxType RHS_TYPE = LHS_TYPE,
- MxBead REG = MxBeadDReg<1>>
+class MxCmp_RR<MxType LHS_TYPE, MxType RHS_TYPE = LHS_TYPE>
: MxInst<(outs), (ins LHS_TYPE.ROp:$lhs, RHS_TYPE.ROp:$rhs),
"cmp."#RHS_TYPE.Prefix#"\t$lhs, $rhs",
- [(set CCR, (MxCmp LHS_TYPE.VT:$lhs, RHS_TYPE.VT:$rhs))],
- MxArithEncoding<MxBead4Bits<0xB>,
- !cast<MxEncOpMode>("MxOpMode"#RHS_TYPE.Size#RHS_TYPE.RLet#"EA"),
- REG,
- !cast<MxEncEA>("MxEncEA"#LHS_TYPE.RLet#"_0"),
- MxExtEmpty>>;
+ [(set CCR, (MxCmp LHS_TYPE.VT:$lhs, RHS_TYPE.VT:$rhs))]> {
+ let Inst = (descend 0b1011,
+ // REGISTER
+ (operand "$rhs", 3),
+ // OPMODE
+ !cast<MxOpModeEncoding>("MxOpMode"#RHS_TYPE.Size#"_"#RHS_TYPE.RLet#"_EA").Value,
+ // MODE without last bit
+ 0b00,
+ // REGISTER prefixed by D/A bit
+ (operand "$lhs", 4)
+ );
+}
class MxCmp_RI<MxType TYPE>
: MxInst<(outs), (ins TYPE.IOp:$imm, TYPE.ROp:$reg),
"cmpi."#TYPE.Prefix#"\t$imm, $reg",
- [(set CCR, (MxCmp TYPE.IPat:$imm, TYPE.VT:$reg))],
- MxArithImmEncoding<MxBead4Bits<0xC>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MxEncEAd_1, MxExtEmpty,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_0")>>;
+ [(set CCR, (MxCmp TYPE.IPat:$imm, TYPE.VT:$reg))]> {
+ let Inst = (ascend
+ (descend 0b00001100,
+ !cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ // The destination cannot be address register, so it's always
+ // the MODE for data register direct mode.
+ /*MODE*/0b000,
+ /*REGISTER*/(operand "$reg", 3)),
+ // Source (i.e. immediate value) encoding
+ MxEncAddrMode_i<"imm", TYPE.Size>.Supplement
+ );
+}
let mayLoad = 1 in {
class MxCmp_MI<MxType TYPE, MxOperand MEMOpd, ComplexPattern MEMPat,
- MxEncEA EA, MxEncExt EXT>
+ MxEncMemOp MEM_ENC>
: MxInst<(outs), (ins TYPE.IOp:$imm, MEMOpd:$mem),
"cmpi."#TYPE.Prefix#"\t$imm, $mem",
- [(set CCR, (MxCmp TYPE.IPat:$imm, (load MEMPat:$mem)))],
- MxArithImmEncoding<MxBead4Bits<0xC>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- EA, EXT,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_0")>>;
+ [(set CCR, (MxCmp TYPE.IPat:$imm, (load MEMPat:$mem)))]> {
+ let Inst = (ascend
+ (descend 0b00001100,
+ !cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ MEM_ENC.EA),
+ // Source (i.e. immediate value) encoding
+ MxEncAddrMode_i<"imm", TYPE.Size>.Supplement,
+ // Destination (i.e. memory operand) encoding
+ MEM_ENC.Supplement
+ );
+}
+// FIXME: What about abs.W?
class MxCmp_BI<MxType TYPE>
: MxInst<(outs), (ins TYPE.IOp:$imm, MxAL32:$abs),
"cmpi."#TYPE.Prefix#"\t$imm, $abs",
[(set CCR, (MxCmp TYPE.IPat:$imm,
- (load (i32 (MxWrapper tglobaladdr:$abs)))))],
- MxArithImmEncoding<MxBead4Bits<0xC>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MxEncEAb, MxExtI32_1,
- !cast<MxEncExt>("MxExtI"#TYPE.Size#"_0")>>;
+ (load (i32 (MxWrapper tglobaladdr:$abs)))))]> {
+ defvar AbsEncoding = MxEncAddrMode_abs<"abs", true>;
+ let Inst = (ascend
+ (descend 0b00001100,
+ !cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ AbsEncoding.EA),
+ // Source (i.e. immediate value) encoding
+ MxEncAddrMode_i<"imm", TYPE.Size>.Supplement,
+ // Destination (i.e. memory operand) encoding
+ AbsEncoding.Supplement
+ );
+}
class MxCmp_RM<MxType TYPE, MxOperand MEMOpd, ComplexPattern MEMPat,
- MxEncEA EA, MxEncExt EXT>
+ MxEncMemOp MEM_ENC>
: MxInst<(outs), (ins TYPE.ROp:$reg, MEMOpd:$mem),
"cmp."#TYPE.Prefix#"\t$mem, $reg",
- [(set CCR, (MxCmp (load MEMPat:$mem), TYPE.ROp:$reg))],
- MxArithEncoding<MxBead4Bits<0xB>,
- !cast<MxEncOpMode>("MxOpMode"#TYPE.Size#"dEA"),
- MxBeadDReg<0>, EA, EXT>>;
+ [(set CCR, (MxCmp (load MEMPat:$mem), TYPE.ROp:$reg))]> {
+ let Inst = (ascend
+ (descend 0b1011,
+ // REGISTER
+ (operand "$reg", 3),
+ // OPMODE
+ !cast<MxOpModeEncoding>("MxOpMode"#TYPE.Size#"_d_EA").Value,
+ MEM_ENC.EA),
+ MEM_ENC.Supplement
+ );
+}
} // let mayLoad = 1
} // let Defs = [CCR]
multiclass MMxCmp_RM<MxType TYPE> {
- def NAME#TYPE.KOp.Letter : MxCmp_RM<TYPE, TYPE.KOp, TYPE.KPat, MxEncEAk,
- MxExtBrief_1>;
- def NAME#TYPE.QOp.Letter : MxCmp_RM<TYPE, TYPE.QOp, TYPE.QPat, MxEncEAq,
- MxExtI16_1>;
- def NAME#TYPE.POp.Letter : MxCmp_RM<TYPE, TYPE.POp, TYPE.PPat, MxEncEAp_1,
- MxExtI16_1>;
- def NAME#TYPE.FOp.Letter : MxCmp_RM<TYPE, TYPE.FOp, TYPE.FPat, MxEncEAf_1,
- MxExtBrief_1>;
- def NAME#TYPE.JOp.Letter : MxCmp_RM<TYPE, TYPE.JOp, TYPE.JPat, MxEncEAj_1,
- MxExtEmpty>;
+ def NAME#TYPE.KOp.Letter : MxCmp_RM<TYPE, TYPE.KOp, TYPE.KPat, MxEncAddrMode_k<"mem">>;
+ def NAME#TYPE.QOp.Letter : MxCmp_RM<TYPE, TYPE.QOp, TYPE.QPat, MxEncAddrMode_q<"mem">>;
+ def NAME#TYPE.POp.Letter : MxCmp_RM<TYPE, TYPE.POp, TYPE.PPat, MxEncAddrMode_p<"mem">>;
+ def NAME#TYPE.FOp.Letter : MxCmp_RM<TYPE, TYPE.FOp, TYPE.FPat, MxEncAddrMode_f<"mem">>;
+ def NAME#TYPE.JOp.Letter : MxCmp_RM<TYPE, TYPE.JOp, TYPE.JPat, MxEncAddrMode_j<"mem">>;
}
multiclass MMxCmp_MI<MxType TYPE> {
- def NAME#TYPE.KOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.KOp, TYPE.KPat, MxEncEAk,
- MxExtBrief_1>;
- def NAME#TYPE.QOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.QOp, TYPE.QPat, MxEncEAq,
- MxExtI16_1>;
- def NAME#TYPE.POp.Letter#"i" : MxCmp_MI<TYPE, TYPE.POp, TYPE.PPat, MxEncEAp_1,
- MxExtI16_1>;
- def NAME#TYPE.FOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.FOp, TYPE.FPat, MxEncEAf_1,
- MxExtBrief_1>;
- def NAME#TYPE.JOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.JOp, TYPE.JPat, MxEncEAj_1,
- MxExtEmpty>;
+ def NAME#TYPE.KOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.KOp, TYPE.KPat,
+ MxEncAddrMode_k<"mem">>;
+ def NAME#TYPE.QOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.QOp, TYPE.QPat,
+ MxEncAddrMode_q<"mem">>;
+ def NAME#TYPE.POp.Letter#"i" : MxCmp_MI<TYPE, TYPE.POp, TYPE.PPat,
+ MxEncAddrMode_p<"mem">>;
+ def NAME#TYPE.FOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.FOp, TYPE.FPat,
+ MxEncAddrMode_f<"mem">>;
+ def NAME#TYPE.JOp.Letter#"i" : MxCmp_MI<TYPE, TYPE.JOp, TYPE.JPat,
+ MxEncAddrMode_j<"mem">>;
}
foreach S = [8, 16, 32] in {
@@ -478,25 +519,31 @@ defm CMP32 : MMxCmp_MI<MxType32d>;
// EXT
//===----------------------------------------------------------------------===//
-def MxExtOpmode_wb : MxBead3Bits<0b010>;
-def MxExtOpmode_lw : MxBead3Bits<0b011>;
-def MxExtOpmode_lb : MxBead3Bits<0b111>;
-
/// ---------------------------------------------------
/// F E D C B A 9 | 8 7 6 | 5 4 3 | 2 1 0
/// ---------------------------------------------------
/// 0 1 0 0 1 0 0 | OPMODE | 0 0 0 | REG
/// ---------------------------------------------------
-class MxExtEncoding<MxBead3Bits OPMODE>
- : MxEncoding<MxBeadDReg<0>, MxBead3Bits<0b000>, OPMODE,
- MxBead3Bits<0b100>, MxBead4Bits<0b0100>>;
-
let Defs = [CCR] in
let Constraints = "$src = $dst" in
class MxExt<MxType TO, MxType FROM>
: MxInst<(outs TO.ROp:$dst), (ins TO.ROp:$src),
- "ext."#TO.Prefix#"\t$src", [],
- MxExtEncoding<!cast<MxBead3Bits>("MxExtOpmode_"#TO.Prefix#FROM.Prefix)>>;
+ "ext."#TO.Prefix#"\t$src", []> {
+ let Inst = (descend 0b0100100,
+ // OPMODE
+ !cond(
+ // byte -> word
+ !and(!eq(FROM.Size, 8), !eq(TO.Size, 16)): 0b010,
+ // word -> long
+ !and(!eq(FROM.Size, 16), !eq(TO.Size, 32)): 0b011,
+ // byte -> long
+ !and(!eq(FROM.Size, 8), !eq(TO.Size, 32)): 0b111
+ ),
+ 0b000,
+ // REGISTER
+ (operand "$src", 3)
+ );
+}
def EXT16 : MxExt<MxType16d, MxType8d>;
def EXT32 : MxExt<MxType32d, MxType16d>;
@@ -511,9 +558,6 @@ def : Pat<(sext_inreg i32:$src, i8),
// DIV/MUL
//===----------------------------------------------------------------------===//
-def MxSDiMuOpmode : MxBead3Bits<0b111>;
-def MxUDiMuOpmode : MxBead3Bits<0b011>;
-
/// Word operation:
/// ----------------------------------------------------
/// F E D C | B A 9 | 8 7 6 | 5 4 3 | 2 1 0
@@ -521,40 +565,45 @@ def MxUDiMuOpmode : MxBead3Bits<0b011>;
/// | | | EFFECTIVE ADDRESS
/// x x x x | REG | OP MODE | MODE | REG
/// ----------------------------------------------------
-class MxDiMuEncoding<MxBead4Bits CMD, MxBead3Bits OPMODE, MxEncEA EA, MxEncExt EXT>
- : MxEncoding<EA.Reg, EA.DA, EA.Mode, OPMODE, MxBeadDReg<0>, CMD,
- EXT.Imm, EXT.B8, EXT.Scale, EXT.WL, EXT.DAReg>;
-
let Defs = [CCR] in {
let Constraints = "$src = $dst" in {
-// $reg <- $reg op $reg
-class MxDiMuOp_DD<string MN, bits<4> CMD, MxBead3Bits OPMODE,
+// $dreg <- $dreg op $dreg
+class MxDiMuOp_DD<string MN, bits<4> CMD, bit SIGNED = false,
MxOperand DST, MxOperand OPD>
- : MxInst<(outs DST:$dst), (ins DST:$src, OPD:$opd), MN#"\t$opd, $dst", [],
- MxDiMuEncoding<MxBead4Bits<CMD>, OPMODE, MxEncEAd_2, MxExtEmpty>>;
+ : MxInst<(outs DST:$dst), (ins DST:$src, OPD:$opd), MN#"\t$opd, $dst", []> {
+ let Inst = (descend CMD,
+ // REGISTER
+ (operand "$dst", 3),
+ !if(SIGNED, 0b111, 0b011),
+ /*MODE*/0b000, /*REGISTER*/(operand "$opd", 3)
+ );
+}
// $reg <- $reg op $imm
-class MxDiMuOp_DI<string MN, bits<4> CMD, MxBead3Bits OPMODE,
+class MxDiMuOp_DI<string MN, bits<4> CMD, bit SIGNED = false,
MxOperand DST, MxOperand OPD>
- : MxInst<(outs DST:$dst), (ins DST:$src, OPD:$opd), MN#"\t$opd, $dst", [],
- MxDiMuEncoding<MxBead4Bits<CMD>, OPMODE, MxEncEAi, MxExtI16_2>>;
+ : MxInst<(outs DST:$dst), (ins DST:$src, OPD:$opd), MN#"\t$opd, $dst", []> {
+ // FIXME: Support immediates with different widths.
+ defvar ImmEnc = MxEncAddrMode_i<"opd", 16>;
+ let Inst = (ascend
+ (descend CMD,
+ // REGISTER
+ (operand "$dst", 3),
+ !if(SIGNED, 0b111, 0b011), ImmEnc.EA),
+ ImmEnc.Supplement
+ );
+}
} // let Constraints
} // Defs = [CCR]
multiclass MxDiMuOp<string MN, bits<4> CMD, bit isComm = 0> {
-
let isCommutable = isComm in {
- def "S"#NAME#"d32d16" : MxDiMuOp_DD<MN#"s", CMD, MxSDiMuOpmode, MxDRD32,
- MxDRD16>;
- def "U"#NAME#"d32d16" : MxDiMuOp_DD<MN#"u", CMD, MxUDiMuOpmode, MxDRD32,
- MxDRD16>;
+ def "S"#NAME#"d32d16" : MxDiMuOp_DD<MN#"s", CMD, /*SIGNED*/true, MxDRD32, MxDRD16>;
+ def "U"#NAME#"d32d16" : MxDiMuOp_DD<MN#"u", CMD, /*SIGNED*/false, MxDRD32, MxDRD16>;
}
- def "S"#NAME#"d32i16" : MxDiMuOp_DI<MN#"s", CMD, MxSDiMuOpmode, MxDRD32,
- Mxi16imm>;
- def "U"#NAME#"d32i16" : MxDiMuOp_DI<MN#"u", CMD, MxUDiMuOpmode, MxDRD32,
- Mxi16imm>;
-
+ def "S"#NAME#"d32i16" : MxDiMuOp_DI<MN#"s", CMD, /*SIGNED*/true, MxDRD32, Mxi16imm>;
+ def "U"#NAME#"d32i16" : MxDiMuOp_DI<MN#"u", CMD, /*SIGNED*/false, MxDRD32, Mxi16imm>;
}
defm DIV : MxDiMuOp<"div", 0x8>;
@@ -697,29 +746,35 @@ def : Pat<(mulhu i16:$dst, MximmSExt16:$opd),
/// | | | EFFECTIVE ADDRESS
/// 0 1 0 0 | x x x x | SIZE | MODE | REG
/// ------------+------------+------+---------+---------
-class MxNEGEncoding<MxBead4Bits CMD, MxEncSize SIZE, MxEncEA EA, MxEncExt EXT>
- : MxEncoding<EA.Reg, EA.DA, EA.Mode, SIZE, CMD, MxBead4Bits<0b0100>,
- EXT.Imm, EXT.B8, EXT.Scale, EXT.WL, EXT.DAReg>;
-
let Defs = [CCR] in {
let Constraints = "$src = $dst" in {
class MxNeg_D<MxType TYPE>
: MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src),
"neg."#TYPE.Prefix#"\t$dst",
- [(set TYPE.VT:$dst, (ineg TYPE.VT:$src))],
- MxNEGEncoding<MxBead4Bits<0x4>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MxEncEAd_0, MxExtEmpty>>;
+ [(set TYPE.VT:$dst, (ineg TYPE.VT:$src))]> {
+ let Inst = (descend 0b01000100,
+ /*SIZE*/!cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ //MODE without last bit
+ 0b00,
+ //REGISTER prefixed by D/A bit
+ (operand "$dst", 4)
+ );
+}
let Uses = [CCR] in {
class MxNegX_D<MxType TYPE>
: MxInst<(outs TYPE.ROp:$dst), (ins TYPE.ROp:$src),
"negx."#TYPE.Prefix#"\t$dst",
- [(set TYPE.VT:$dst, (MxSubX 0, TYPE.VT:$src, CCR))],
- MxNEGEncoding<MxBead4Bits<0x0>,
- !cast<MxEncSize>("MxEncSize"#TYPE.Size),
- MxEncEAd_0, MxExtEmpty>>;
+ [(set TYPE.VT:$dst, (MxSubX 0, TYPE.VT:$src, CCR))]> {
+ let Inst = (descend 0b01000000,
+ /*SIZE*/!cast<MxNewEncSize>("MxNewEncSize"#TYPE.Size).Value,
+ //MODE without last bit
+ 0b00,
+ //REGISTER prefixed by D/A bit
+ (operand "$dst", 4)
+ );
+}
}
} // let Constraints
diff --git a/llvm/lib/Target/M68k/M68kInstrFormats.td b/llvm/lib/Target/M68k/M68kInstrFormats.td
index 7e0c96a5b1f6..b3c4fdfe2f53 100644
--- a/llvm/lib/Target/M68k/M68kInstrFormats.td
+++ b/llvm/lib/Target/M68k/M68kInstrFormats.td
@@ -200,6 +200,11 @@ class MxEncEA<MxBead reg, MxBead mode, MxBead da = MxBeadIgnore> {
MxBead DA = da;
}
+class MxEncMemOp {
+ dag EA = (ascend);
+ dag Supplement = (ascend);
+}
+
// FIXME: Is there a way to factorize the addressing mode suffix (i.e.
// 'r', 'd', 'a' etc.) and use something like multiclass to replace?
def MxEncEAr_0: MxEncEA<MxBeadDAReg<0>, MxBead2Bits<0b00>>;
@@ -237,6 +242,93 @@ def MxEncEAq : MxEncEA<MxBead3Bits<0b010>, MxBead2Bits<0b11>, MxBead1Bit<1>>;
def MxEncEAk : MxEncEA<MxBead3Bits<0b011>, MxBead2Bits<0b11>, MxBead1Bit<1>>;
def MxEncEAi : MxEncEA<MxBead3Bits<0b100>, MxBead2Bits<0b11>, MxBead1Bit<1>>;
+class MxEncBriefExt<string reg_opnd, string disp_opnd,
+ bit size_w_l = false, int scale = 1> {
+ dag Value = (descend
+ // D/A + REGISTER
+ (operand "$"#reg_opnd, 4),
+ // W/L
+ size_w_l,
+ // SCALE
+ !cond(
+ !eq(scale, 1) : 0b00,
+ !eq(scale, 2) : 0b01,
+ !eq(scale, 4) : 0b10,
+ !eq(scale, 8) : 0b11
+ ),
+ 0b0,
+ // Displacement
+ (operand "$"#disp_opnd, 8)
+ );
+}
+
+class MxEncAddrMode_k<string opnd_name> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b111,
+ /*REGISTER*/0b011);
+
+ let Supplement = MxEncBriefExt<opnd_name#".index", opnd_name#".disp",
+ /*W/L*/true>.Value;
+}
+
+class MxEncAddrMode_q<string opnd_name> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b111,
+ /*REGISTER*/0b010);
+
+ // 16-bit Displacement
+ let Supplement = (operand "$"#opnd_name, 16);
+}
+
+class MxEncAddrMode_p<string opnd_name> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b101,
+ /*REGISTER*/(operand "$"#opnd_name#".reg", 3));
+
+ // 16-bit Displacement
+ let Supplement = (operand "$"#opnd_name#".disp", 16);
+}
+
+class MxEncAddrMode_f<string opnd_name> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b110,
+ /*REGISTER*/(operand "$"#opnd_name#".reg", 3));
+
+ let Supplement = MxEncBriefExt<opnd_name#".index", opnd_name#".disp",
+ /*W/L*/true>.Value;
+}
+
+class MxEncAddrMode_j<string reg_opnd> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b010,
+ /*REGISTER*/(operand "$"#reg_opnd, 3));
+}
+
+class MxEncAddrMode_i<string opnd_name, int size> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b111,
+ /*REGISTER*/0b100);
+
+ // Immediate
+ let Supplement =
+ !cond(
+ !eq(size, 8) : (descend 0b00000000, (operand "$"#opnd_name, 8)),
+ !eq(size, 16) : (operand "$"#opnd_name, 16),
+ !eq(size, 32) : (ascend (slice "$"#opnd_name, 31, 16),
+ (slice "$"#opnd_name, 15, 0))
+ );
+}
+
+// abs.W -> size_w_l = false
+// abs.L -> size_w_l = true
+class MxEncAddrMode_abs<string opnd_name, bit size_w_l = false> : MxEncMemOp {
+ let EA = (descend /*MODE*/0b111,
+ /*REGISTER*/0b00, size_w_l);
+
+ // Absolute address
+ let Supplement = !if(size_w_l,
+ // abs.L
+ (ascend (slice "$"#opnd_name, 31, 16),
+ (slice "$"#opnd_name, 15, 0)),
+ // abs.W
+ (operand "$"#opnd_name, 16)
+ );
+}
+
// Allows you to specify each bit of opcode
class MxEncOpMode<MxBead b0, MxBead b1 = MxBeadIgnore, MxBead b2 = MxBeadIgnore> {
MxBead B0 = b0;
@@ -332,6 +424,16 @@ def MxEncSize16 : MxEncSize<0b01>;
def MxEncSize32 : MxEncSize<0b10>;
def MxEncSize64 : MxEncSize<0b11>;
+// TODO: Remove "New" in the name after the codebead-based
+// representation is deprecated.
+class MxNewEncSize<bits<2> value> {
+ bits<2> Value = value;
+}
+def MxNewEncSize8 : MxNewEncSize<0b00>;
+def MxNewEncSize16 : MxNewEncSize<0b01>;
+def MxNewEncSize32 : MxNewEncSize<0b10>;
+def MxNewEncSize64 : MxNewEncSize<0b11>;
+
// M68k INSTRUCTION. Most instructions specify the location of an operand by
// using the effective address field in the operation word. The effective address
// is composed of two 3-bit fields: the mode field and the register field. The
@@ -357,6 +459,7 @@ class MxInst<dag outs, dag ins,
// Byte stream
field bits<192> Beads = beads.Value;
+ dag Inst = (ascend);
// Number of bytes
let Size = 0;
diff --git a/llvm/lib/Target/M68k/M68kInstrInfo.td b/llvm/lib/Target/M68k/M68kInstrInfo.td
index c581dd91eaaa..402cba884220 100644
--- a/llvm/lib/Target/M68k/M68kInstrInfo.td
+++ b/llvm/lib/Target/M68k/M68kInstrInfo.td
@@ -291,13 +291,13 @@ def MxARIPD32_TC : MxMemOp<(ops AR32_TC), MxSize32, "e", "printARIPD32Mem", MxA
// extension word. The reference is classified as a data reference with the
// exception of the jump and jump-to-subroutine instructions.
def MxARID : MxOpClass<"ARID">;
-def MxARID8 : MxMemOp<(ops i16imm, AR32), MxSize8, "p", "printARID8Mem", MxARID>;
-def MxARID16 : MxMemOp<(ops i16imm, AR32), MxSize16, "p", "printARID16Mem", MxARID>;
-def MxARID32 : MxMemOp<(ops i16imm, AR32), MxSize32, "p", "printARID32Mem", MxARID>;
+def MxARID8 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize8, "p", "printARID8Mem", MxARID>;
+def MxARID16 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize16, "p", "printARID16Mem", MxARID>;
+def MxARID32 : MxMemOp<(ops i16imm:$disp, AR32:$reg), MxSize32, "p", "printARID32Mem", MxARID>;
-def MxARID8_TC : MxMemOp<(ops i16imm, AR32_TC), MxSize8, "p", "printARID8Mem", MxARID>;
-def MxARID16_TC : MxMemOp<(ops i16imm, AR32_TC), MxSize16, "p", "printARID16Mem", MxARID>;
-def MxARID32_TC : MxMemOp<(ops i16imm, AR32_TC), MxSize32, "p", "printARID32Mem", MxARID>;
+def MxARID8_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize8, "p", "printARID8Mem", MxARID>;
+def MxARID16_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize16, "p", "printARID16Mem", MxARID>;
+def MxARID32_TC : MxMemOp<(ops i16imm:$disp, AR32_TC:$reg), MxSize32, "p", "printARID32Mem", MxARID>;
// ADDRESS REGISTER INDIRECT WITH INDEX. This addressing mode requires one word
// of extension. The address of the operand is the sum of the address in the
@@ -306,13 +306,19 @@ def MxARID32_TC : MxMemOp<(ops i16imm, AR32_TC), MxSize32, "p", "printARID32Me
// The reference is classified as a data reference with the exception of the
// jump and jump-to-subroutine instructions
def MxARII : MxOpClass<"ARII">;
-def MxARII8 : MxMemOp<(ops i8imm, AR32, XR32), MxSize8, "f", "printARII8Mem", MxARII>;
-def MxARII16 : MxMemOp<(ops i8imm, AR32, XR32), MxSize16, "f", "printARII16Mem", MxARII>;
-def MxARII32 : MxMemOp<(ops i8imm, AR32, XR32), MxSize32, "f", "printARII32Mem", MxARII>;
-
-def MxARII8_TC : MxMemOp<(ops i8imm, AR32_TC, XR32_TC), MxSize8, "f", "printARII8Mem", MxARII>;
-def MxARII16_TC : MxMemOp<(ops i8imm, AR32_TC, XR32_TC), MxSize16, "f", "printARII16Mem", MxARII>;
-def MxARII32_TC : MxMemOp<(ops i8imm, AR32_TC, XR32_TC), MxSize32, "f", "printARII32Mem", MxARII>;
+def MxARII8 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
+ MxSize8, "f", "printARII8Mem", MxARII>;
+def MxARII16 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
+ MxSize16, "f", "printARII16Mem", MxARII>;
+def MxARII32 : MxMemOp<(ops i8imm:$disp, AR32:$reg, XR32:$index),
+ MxSize32, "f", "printARII32Mem", MxARII>;
+
+def MxARII8_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
+ MxSize8, "f", "printARII8Mem", MxARII>;
+def MxARII16_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
+ MxSize16, "f", "printARII16Mem", MxARII>;
+def MxARII32_TC : MxMemOp<(ops i8imm:$disp, AR32_TC:$reg, XR32_TC:$index),
+ MxSize32, "f", "printARII32Mem", MxARII>;
// ABSOLUTE SHORT ADDRESS. This addressing mode requires one word of extension.
// The address of the operand is the extension word. The 16-bit address is sign
@@ -360,9 +366,9 @@ def MxPCD32 : MxMemOp<(ops i16imm), MxSize32, "q", "printPCD32Mem", MxPCD>;
// word, and the contents of the index register. The value in the program
// counter is the address of the extension word. This reference is classified as
// a program reference.
-def MxPCI8 : MxMemOp<(ops i8imm, XR32), MxSize8, "k", "printPCI8Mem", MxPCI>;
-def MxPCI16 : MxMemOp<(ops i8imm, XR32), MxSize16, "k", "printPCI16Mem", MxPCI>;
-def MxPCI32 : MxMemOp<(ops i8imm, XR32), MxSize32, "k", "printPCI32Mem", MxPCI>;
+def MxPCI8 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize8, "k", "printPCI8Mem", MxPCI>;
+def MxPCI16 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize16, "k", "printPCI16Mem", MxPCI>;
+def MxPCI32 : MxMemOp<(ops i8imm:$disp, XR32:$index), MxSize32, "k", "printPCI32Mem", MxPCI>;
} // OPERAND_PCREL
def MxImm : AsmOperandClass {
diff --git a/llvm/lib/Target/M68k/M68kRegisterInfo.h b/llvm/lib/Target/M68k/M68kRegisterInfo.h
index 7f822e1cb34f..fc55e19a958b 100644
--- a/llvm/lib/Target/M68k/M68kRegisterInfo.h
+++ b/llvm/lib/Target/M68k/M68kRegisterInfo.h
@@ -97,6 +97,14 @@ public:
bool canRealignStack(const MachineFunction &MF) const override;
Register getFrameRegister(const MachineFunction &MF) const override;
+
+ const TargetRegisterClass *
+ getCrossCopyRegClass(const TargetRegisterClass *RC) const override {
+ if (RC == &M68k::CCRCRegClass)
+ return &M68k::DR32RegClass;
+ return RC;
+ }
+
unsigned getStackRegister() const { return StackPtr; }
unsigned getBaseRegister() const { return BasePtr; }
unsigned getGlobalBaseRegister() const { return GlobalBasePtr; }
diff --git a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
index 9227bd6c3a78..e4ecd3b41824 100644
--- a/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
+++ b/llvm/lib/Target/M68k/MCTargetDesc/M68kMCCodeEmitter.cpp
@@ -39,6 +39,14 @@ class M68kMCCodeEmitter : public MCCodeEmitter {
const MCInstrInfo &MCII;
MCContext &Ctx;
+ void getBinaryCodeForInstr(const MCInst &MI, SmallVectorImpl<MCFixup> &Fixups,
+ APInt &Inst, APInt &Scratch,
+ const MCSubtargetInfo &STI) const;
+
+ void getMachineOpValue(const MCInst &MI, const MCOperand &Op, APInt &Value,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const;
+
public:
M68kMCCodeEmitter(const MCInstrInfo &mcii, MCContext &ctx)
: MCII(mcii), Ctx(ctx) {}
@@ -72,6 +80,28 @@ public:
} // end anonymous namespace
+#include "M68kGenMCCodeEmitter.inc"
+
+void M68kMCCodeEmitter::getMachineOpValue(const MCInst &MI, const MCOperand &Op,
+ APInt &Value,
+ SmallVectorImpl<MCFixup> &Fixups,
+ const MCSubtargetInfo &STI) const {
+ // Register
+ if (Op.isReg()) {
+ unsigned RegNum = Op.getReg();
+ const auto *RI = Ctx.getRegisterInfo();
+ Value |= RI->getEncodingValue(RegNum);
+ // Setup the D/A bit
+ if (M68kII::isAddressRegister(RegNum))
+ Value |= 0b1000;
+ } else if (Op.isImm()) {
+ // Immediate
+ Value |= static_cast<uint64_t>(Op.getImm());
+ } else {
+ llvm_unreachable("Unsupported operand type");
+ }
+}
+
unsigned M68kMCCodeEmitter::encodeBits(unsigned ThisByte, uint8_t Bead,
const MCInst &MI,
const MCInstrDesc &Desc,
@@ -321,6 +351,26 @@ void M68kMCCodeEmitter::encodeInstruction(const MCInst &MI, raw_ostream &OS,
LLVM_DEBUG(dbgs() << "EncodeInstruction: " << MCII.getName(Opcode) << "("
<< Opcode << ")\n");
+ // Try using the new method first.
+ APInt EncodedInst(16, 0U);
+ APInt Scratch(16, 0U);
+ getBinaryCodeForInstr(MI, Fixups, EncodedInst, Scratch, STI);
+ if (EncodedInst.getBitWidth()) {
+ LLVM_DEBUG(dbgs() << "Instruction " << MCII.getName(Opcode) << "(" << Opcode
+ << ") is using the new code emitter\n");
+ ArrayRef<uint64_t> Data(EncodedInst.getRawData(),
+ EncodedInst.getNumWords());
+ int64_t InstSize = EncodedInst.getBitWidth();
+ for (uint64_t Word : Data) {
+ for (int i = 0; i < 4 && InstSize > 0; ++i, InstSize -= 16) {
+ support::endian::write<uint16_t>(OS, static_cast<uint16_t>(Word),
+ support::big);
+ Word >>= 16;
+ }
+ }
+ return;
+ }
+
const uint8_t *Beads = getGenInstrBeads(MI);
if (!Beads || !*Beads) {
llvm_unreachable("*** Instruction does not have Beads defined");
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
index ad003404d793..f3cc7d3fb46f 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.cpp
@@ -1116,14 +1116,6 @@ bool RISCVFrameLowering::restoreCalleeSavedRegisters(
return true;
}
-bool RISCVFrameLowering::enableShrinkWrapping(const MachineFunction &MF) const {
- // Keep the conventional code flow when not optimizing.
- if (MF.getFunction().hasOptNone())
- return false;
-
- return true;
-}
-
bool RISCVFrameLowering::canUseAsPrologue(const MachineBasicBlock &MBB) const {
MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
const MachineFunction *MF = MBB.getParent();
diff --git a/llvm/lib/Target/RISCV/RISCVFrameLowering.h b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
index 1e94e34acf2f..bc3ace786272 100644
--- a/llvm/lib/Target/RISCV/RISCVFrameLowering.h
+++ b/llvm/lib/Target/RISCV/RISCVFrameLowering.h
@@ -65,8 +65,6 @@ public:
bool canUseAsPrologue(const MachineBasicBlock &MBB) const override;
bool canUseAsEpilogue(const MachineBasicBlock &MBB) const override;
- bool enableShrinkWrapping(const MachineFunction &MF) const override;
-
bool isSupportedStackID(TargetStackID::Value ID) const override;
TargetStackID::Value getStackIDForScalableVectors() const override;
diff --git a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
index e9dd14348822..42a29384b057 100644
--- a/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
+++ b/llvm/lib/Target/RISCV/RISCVISelLowering.cpp
@@ -1068,6 +1068,8 @@ RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
setTargetDAGCombine(ISD::AND);
setTargetDAGCombine(ISD::OR);
setTargetDAGCombine(ISD::XOR);
+ setTargetDAGCombine(ISD::ROTL);
+ setTargetDAGCombine(ISD::ROTR);
setTargetDAGCombine(ISD::ANY_EXTEND);
if (Subtarget.hasStdExtF()) {
setTargetDAGCombine(ISD::ZERO_EXTEND);
@@ -7269,6 +7271,40 @@ static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
}
+// Combine
+// ROTR ((GREV x, 24), 16) -> (GREVI x, 8)
+// ROTL ((GREV x, 24), 16) -> (GREVI x, 8)
+// RORW ((GREVW x, 24), 16) -> (GREVIW x, 8)
+// ROLW ((GREVW x, 24), 16) -> (GREVIW x, 8)
+static SDValue combineROTR_ROTL_RORW_ROLW(SDNode *N, SelectionDAG &DAG) {
+ SDValue Src = N->getOperand(0);
+ SDLoc DL(N);
+ unsigned Opc;
+
+ if ((N->getOpcode() == ISD::ROTR || N->getOpcode() == ISD::ROTL) &&
+ Src.getOpcode() == RISCVISD::GREV)
+ Opc = RISCVISD::GREV;
+ else if ((N->getOpcode() == RISCVISD::RORW ||
+ N->getOpcode() == RISCVISD::ROLW) &&
+ Src.getOpcode() == RISCVISD::GREVW)
+ Opc = RISCVISD::GREVW;
+ else
+ return SDValue();
+
+ if (!isa<ConstantSDNode>(N->getOperand(1)) ||
+ !isa<ConstantSDNode>(Src.getOperand(1)))
+ return SDValue();
+
+ unsigned ShAmt1 = N->getConstantOperandVal(1);
+ unsigned ShAmt2 = Src.getConstantOperandVal(1);
+ if (ShAmt1 != 16 && ShAmt2 != 24)
+ return SDValue();
+
+ Src = Src.getOperand(0);
+ return DAG.getNode(Opc, DL, N->getValueType(0), Src,
+ DAG.getConstant(8, DL, N->getOperand(1).getValueType()));
+}
+
// Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
// non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
// Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
@@ -7973,8 +8009,12 @@ SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
if (SimplifyDemandedLowBitsHelper(0, 32) ||
SimplifyDemandedLowBitsHelper(1, 5))
return SDValue(N, 0);
- break;
+
+ return combineROTR_ROTL_RORW_ROLW(N, DAG);
}
+ case ISD::ROTR:
+ case ISD::ROTL:
+ return combineROTR_ROTL_RORW_ROLW(N, DAG);
case RISCVISD::CLZW:
case RISCVISD::CTZW: {
// Only the lower 32 bits of the first operand are read
diff --git a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
index 8fb78195c1f1..bd7ba322d758 100644
--- a/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInsertVSETVLI.cpp
@@ -334,6 +334,10 @@ public:
return false;
}
+ bool operator!=(const VSETVLIInfo &Other) const {
+ return !(*this == Other);
+ }
+
// Calculate the VSETVLIInfo visible to a block assuming this and Other are
// both predecessors.
VSETVLIInfo intersect(const VSETVLIInfo &Other) const {
@@ -999,12 +1003,6 @@ bool RISCVInsertVSETVLI::needVSETVLIPHI(const VSETVLIInfo &Require,
void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
VSETVLIInfo CurInfo;
- // BBLocalInfo tracks the VL/VTYPE state the same way BBInfo.Change was
- // calculated in computeIncomingVLVTYPE. We need this to apply
- // canSkipVSETVLIForLoadStore the same way computeIncomingVLVTYPE did. We
- // can't include predecessor information in that decision to avoid disagreeing
- // with the global analysis.
- VSETVLIInfo BBLocalInfo;
// Only be set if current VSETVLIInfo is from an explicit VSET(I)VLI.
MachineInstr *PrevVSETVLIMI = nullptr;
@@ -1020,7 +1018,6 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
MI.getOperand(3).setIsDead(false);
MI.getOperand(4).setIsDead(false);
CurInfo = getInfoForVSETVLI(MI);
- BBLocalInfo = getInfoForVSETVLI(MI);
PrevVSETVLIMI = &MI;
continue;
}
@@ -1050,22 +1047,12 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
// use the predecessor information.
assert(BlockInfo[MBB.getNumber()].Pred.isValid() &&
"Expected a valid predecessor state.");
- // Don't use predecessor information if there was an earlier instruction
- // in this block that allowed a vsetvli to be skipped for load/store.
- if (!(BBLocalInfo.isValid() &&
- canSkipVSETVLIForLoadStore(MI, NewInfo, BBLocalInfo)) &&
- needVSETVLI(NewInfo, BlockInfo[MBB.getNumber()].Pred) &&
+ if (needVSETVLI(NewInfo, BlockInfo[MBB.getNumber()].Pred) &&
needVSETVLIPHI(NewInfo, MBB)) {
insertVSETVLI(MBB, MI, NewInfo, BlockInfo[MBB.getNumber()].Pred);
CurInfo = NewInfo;
- BBLocalInfo = NewInfo;
}
-
- // We must update BBLocalInfo for every vector instruction.
- if (!BBLocalInfo.isValid())
- BBLocalInfo = NewInfo;
} else {
- assert(BBLocalInfo.isValid());
// If this instruction isn't compatible with the previous VL/VTYPE
// we need to insert a VSETVLI.
// If this is a unit-stride or strided load/store, we may be able to use
@@ -1101,7 +1088,6 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
if (NeedInsertVSETVLI)
insertVSETVLI(MBB, MI, NewInfo, CurInfo);
CurInfo = NewInfo;
- BBLocalInfo = NewInfo;
}
}
PrevVSETVLIMI = nullptr;
@@ -1112,9 +1098,19 @@ void RISCVInsertVSETVLI::emitVSETVLIs(MachineBasicBlock &MBB) {
if (MI.isCall() || MI.isInlineAsm() || MI.modifiesRegister(RISCV::VL) ||
MI.modifiesRegister(RISCV::VTYPE)) {
CurInfo = VSETVLIInfo::getUnknown();
- BBLocalInfo = VSETVLIInfo::getUnknown();
PrevVSETVLIMI = nullptr;
}
+
+ // If we reach the end of the block and our current info doesn't match the
+ // expected info, insert a vsetvli to correct.
+ if (MI.isTerminator()) {
+ const VSETVLIInfo &ExitInfo = BlockInfo[MBB.getNumber()].Exit;
+ if (CurInfo.isValid() && ExitInfo.isValid() && !ExitInfo.isUnknown() &&
+ CurInfo != ExitInfo) {
+ insertVSETVLI(MBB, MI, ExitInfo, CurInfo);
+ CurInfo = ExitInfo;
+ }
+ }
}
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
index f64717a6d88b..4f39b9c8a982 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfo.cpp
@@ -998,6 +998,8 @@ bool RISCVInstrInfo::isAsCheapAsAMove(const MachineInstr &MI) const {
return (MI.getOperand(1).isReg() &&
MI.getOperand(1).getReg() == RISCV::X0) ||
(MI.getOperand(2).isImm() && MI.getOperand(2).getImm() == 0);
+ case RISCV::LUI:
+ return MI.getOperand(1).getTargetFlags() != RISCVII::MO_HI;
}
return MI.isAsCheapAsAMove();
}
diff --git a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
index 7df3aabe041e..5ccd24b077f3 100644
--- a/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
+++ b/llvm/lib/Target/RISCV/RISCVInstrInfoZb.td
@@ -880,10 +880,6 @@ def : PatGprGpr<riscv_gorcw, GORCW>;
def : PatGprImm<riscv_grevw, GREVIW, uimm5>;
def : PatGprImm<riscv_gorcw, GORCIW, uimm5>;
-// FIXME: Move to DAG combine.
-def : Pat<(riscv_rorw (riscv_grevw GPR:$rs1, 24), 16), (GREVIW GPR:$rs1, 8)>;
-def : Pat<(riscv_rolw (riscv_grevw GPR:$rs1, 24), 16), (GREVIW GPR:$rs1, 8)>;
-
def : PatGprGpr<riscv_shflw, SHFLW>;
def : PatGprGpr<riscv_unshflw, UNSHFLW>;
} // Predicates = [HasStdExtZbp, IsRV64]
@@ -892,10 +888,6 @@ let Predicates = [HasStdExtZbp, IsRV64] in
def : PatGprGpr<int_riscv_xperm_w, XPERM_W>;
let Predicates = [HasStdExtZbp, IsRV32] in {
-// FIXME : Move to DAG combine.
-def : Pat<(i32 (rotr (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>;
-def : Pat<(i32 (rotl (riscv_grev GPR:$rs1, 24), (i32 16))), (GREVI GPR:$rs1, 8)>;
-
// We treat rev8 as a separate instruction, so match it directly.
def : Pat<(i32 (riscv_grev GPR:$rs1, 24)), (REV8_RV32 GPR:$rs1)>;
} // Predicates = [HasStdExtZbp, IsRV32]
diff --git a/llvm/lib/Target/Sparc/SparcISelLowering.cpp b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
index 6d6879bc94b3..501bb4d4910f 100644
--- a/llvm/lib/Target/Sparc/SparcISelLowering.cpp
+++ b/llvm/lib/Target/Sparc/SparcISelLowering.cpp
@@ -825,9 +825,8 @@ SparcTargetLowering::LowerCall_32(TargetLowering::CallLoweringInfo &CLI,
hasStructRetAttr = true;
// sret only allowed on first argument
assert(Outs[realArgIdx].OrigArgIndex == 0);
- PointerType *Ty = cast<PointerType>(CLI.getArgs()[0].Ty);
- Type *ElementTy = Ty->getPointerElementType();
- SRetArgSize = DAG.getDataLayout().getTypeAllocSize(ElementTy);
+ SRetArgSize =
+ DAG.getDataLayout().getTypeAllocSize(CLI.getArgs()[0].IndirectType);
continue;
}
@@ -2178,8 +2177,10 @@ SparcTargetLowering::LowerF128Op(SDValue Op, SelectionDAG &DAG,
RetPtr = DAG.getFrameIndex(RetFI, PtrVT);
Entry.Node = RetPtr;
Entry.Ty = PointerType::getUnqual(RetTy);
- if (!Subtarget->is64Bit())
+ if (!Subtarget->is64Bit()) {
Entry.IsSRet = true;
+ Entry.IndirectType = RetTy;
+ }
Entry.IsReturned = false;
Args.push_back(Entry);
RetTyABI = Type::getVoidTy(*DAG.getContext());
diff --git a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
index e01adcce04ab..84b337685c14 100644
--- a/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZAsmPrinter.cpp
@@ -126,15 +126,18 @@ static MCInst lowerSubvectorStore(const MachineInstr *MI, unsigned Opcode) {
void SystemZAsmPrinter::emitInstruction(const MachineInstr *MI) {
SystemZMCInstLower Lower(MF->getContext(), *this);
- const SystemZSubtarget *Subtarget = &MF->getSubtarget<SystemZSubtarget>();
MCInst LoweredMI;
switch (MI->getOpcode()) {
case SystemZ::Return:
- if (Subtarget->isTargetXPLINK64())
- LoweredMI =
- MCInstBuilder(SystemZ::B).addReg(SystemZ::R7D).addImm(2).addReg(0);
- else
- LoweredMI = MCInstBuilder(SystemZ::BR).addReg(SystemZ::R14D);
+ LoweredMI = MCInstBuilder(SystemZ::BR)
+ .addReg(SystemZ::R14D);
+ break;
+
+ case SystemZ::Return_XPLINK:
+ LoweredMI = MCInstBuilder(SystemZ::B)
+ .addReg(SystemZ::R7D)
+ .addImm(2)
+ .addReg(0);
break;
case SystemZ::CondReturn:
@@ -144,6 +147,15 @@ void SystemZAsmPrinter::emitInstruction(const MachineInstr *MI) {
.addReg(SystemZ::R14D);
break;
+ case SystemZ::CondReturn_XPLINK:
+ LoweredMI = MCInstBuilder(SystemZ::BC)
+ .addImm(MI->getOperand(0).getImm())
+ .addImm(MI->getOperand(1).getImm())
+ .addReg(SystemZ::R7D)
+ .addImm(2)
+ .addReg(0);
+ break;
+
case SystemZ::CRBReturn:
LoweredMI = MCInstBuilder(SystemZ::CRB)
.addReg(MI->getOperand(0).getReg())
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
index 4b6aa60f5d55..aca1c27f9795 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.cpp
@@ -674,6 +674,7 @@ bool SystemZInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI,
bool SystemZInstrInfo::isPredicable(const MachineInstr &MI) const {
unsigned Opcode = MI.getOpcode();
if (Opcode == SystemZ::Return ||
+ Opcode == SystemZ::Return_XPLINK ||
Opcode == SystemZ::Trap ||
Opcode == SystemZ::CallJG ||
Opcode == SystemZ::CallBR)
@@ -731,11 +732,13 @@ bool SystemZInstrInfo::PredicateInstruction(
.addReg(SystemZ::CC, RegState::Implicit);
return true;
}
- if (Opcode == SystemZ::Return) {
- MI.setDesc(get(SystemZ::CondReturn));
+ if (Opcode == SystemZ::Return || Opcode == SystemZ::Return_XPLINK) {
+ MI.setDesc(get(Opcode == SystemZ::Return ? SystemZ::CondReturn
+ : SystemZ::CondReturn_XPLINK));
MachineInstrBuilder(*MI.getParent()->getParent(), MI)
- .addImm(CCValid).addImm(CCMask)
- .addReg(SystemZ::CC, RegState::Implicit);
+ .addImm(CCValid)
+ .addImm(CCMask)
+ .addReg(SystemZ::CC, RegState::Implicit);
return true;
}
if (Opcode == SystemZ::CallJG) {
diff --git a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
index 84f1e0fb428c..c47731b26e95 100644
--- a/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
+++ b/llvm/lib/Target/SystemZ/SystemZInstrInfo.td
@@ -336,13 +336,25 @@ let isCall = 1, isTerminator = 1, isReturn = 1 in {
def CLGIBCall : Alias<6, (outs), (ins GR64:$R1, imm64zx8:$I2, cond4:$M3, ADDR64:$R4), []>;
}
-// A return instruction (br %r14) for ELF and (b 2 %r7) for XPLink.
-let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
- def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
+let Predicates = [IsTargetXPLINK64] in {
+ // A return instruction (b 2(%r7)).
+ let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
+ def Return_XPLINK : Alias<4, (outs), (ins), [(z_retflag)]>;
+
+ // A conditional return instruction (bc <cond>, 2(%r7)).
+ let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, CCMaskFirst = 1, Uses = [CC] in
+ def CondReturn_XPLINK : Alias<4, (outs), (ins cond4:$valid, cond4:$R1), []>;
+}
-// A conditional return instruction (bcr <cond>, %r14).
-let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, CCMaskFirst = 1, Uses = [CC] in
- def CondReturn : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
+let Predicates = [IsTargetELF] in {
+ // A return instruction (br %r14).
+ let isReturn = 1, isTerminator = 1, isBarrier = 1, hasCtrlDep = 1 in
+ def Return : Alias<2, (outs), (ins), [(z_retflag)]>;
+
+ // A conditional return instruction (bcr <cond>, %r14).
+ let isReturn = 1, isTerminator = 1, hasCtrlDep = 1, CCMaskFirst = 1, Uses = [CC] in
+ def CondReturn : Alias<2, (outs), (ins cond4:$valid, cond4:$R1), []>;
+}
// Fused compare and conditional returns.
let isReturn = 1, isTerminator = 1, hasCtrlDep = 1 in {
diff --git a/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td b/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td
index f4777b0097f1..ac9250147015 100644
--- a/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td
+++ b/llvm/lib/Target/SystemZ/SystemZScheduleZ13.td
@@ -172,8 +172,8 @@ def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "(Call)?BAS(R)?(_XPLINK6
def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "TLS_(G|L)DCALL$")>;
// Return
-def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return$")>;
-def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn$")>;
+def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return(_XPLINK)?$")>;
+def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn(_XPLINK)?$")>;
//===----------------------------------------------------------------------===//
// Move instructions
diff --git a/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td b/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td
index f74c0d594482..683b66a6f1ed 100644
--- a/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td
+++ b/llvm/lib/Target/SystemZ/SystemZScheduleZ14.td
@@ -173,8 +173,8 @@ def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "(Call)?BAS(R)?(_XPLINK6
def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "TLS_(G|L)DCALL$")>;
// Return
-def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return$")>;
-def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn$")>;
+def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return(_XPLINK)?$")>;
+def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn(_XPLINK)?$")>;
//===----------------------------------------------------------------------===//
// Move instructions
diff --git a/llvm/lib/Target/SystemZ/SystemZScheduleZ15.td b/llvm/lib/Target/SystemZ/SystemZScheduleZ15.td
index d17e58fc6318..2ebdf508f22b 100644
--- a/llvm/lib/Target/SystemZ/SystemZScheduleZ15.td
+++ b/llvm/lib/Target/SystemZ/SystemZScheduleZ15.td
@@ -173,8 +173,8 @@ def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "(Call)?BAS(R)?(_XPLINK6
def : InstRW<[WLat1, FXa2, FXb, GroupAlone], (instregex "TLS_(G|L)DCALL$")>;
// Return
-def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return$")>;
-def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn$")>;
+def : InstRW<[WLat1, FXb, EndGroup], (instregex "Return(_XPLINK)?$")>;
+def : InstRW<[WLat1, FXb, NormalGr], (instregex "CondReturn(_XPLINK)?$")>;
//===----------------------------------------------------------------------===//
// Move instructions
diff --git a/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td b/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td
index 0f01a4291cf7..51c87c2380c0 100644
--- a/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td
+++ b/llvm/lib/Target/SystemZ/SystemZScheduleZ196.td
@@ -151,8 +151,8 @@ def : InstRW<[WLat1, LSU, FXU2, GroupAlone], (instregex "(Call)?BAS(R)?(_XPLINK6
def : InstRW<[WLat1, LSU, FXU2, GroupAlone], (instregex "TLS_(G|L)DCALL$")>;
// Return
-def : InstRW<[WLat1, LSU, EndGroup], (instregex "Return$")>;
-def : InstRW<[WLat1, LSU, EndGroup], (instregex "CondReturn$")>;
+def : InstRW<[WLat1, LSU, EndGroup], (instregex "Return(_XPLINK)?$")>;
+def : InstRW<[WLat1, LSU, EndGroup], (instregex "CondReturn(_XPLINK)?$")>;
//===----------------------------------------------------------------------===//
// Move instructions
diff --git a/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td b/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td
index 096a95a82ec8..8f2379ce052a 100644
--- a/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td
+++ b/llvm/lib/Target/SystemZ/SystemZScheduleZEC12.td
@@ -156,8 +156,8 @@ def : InstRW<[WLat1, FXU2, LSU, GroupAlone], (instregex "(Call)?BAS(R)?(_XPLINK6
def : InstRW<[WLat1, FXU2, LSU, GroupAlone], (instregex "TLS_(G|L)DCALL$")>;
// Return
-def : InstRW<[WLat1, LSU, EndGroup], (instregex "Return$")>;
-def : InstRW<[WLat1, LSU, NormalGr], (instregex "CondReturn$")>;
+def : InstRW<[WLat1, LSU, EndGroup], (instregex "Return(_XPLINK)?$")>;
+def : InstRW<[WLat1, LSU, NormalGr], (instregex "CondReturn(_XPLINK)?$")>;
//===----------------------------------------------------------------------===//
// Move instructions
diff --git a/llvm/lib/Target/X86/X86ISelLowering.cpp b/llvm/lib/Target/X86/X86ISelLowering.cpp
index 53c00affd70e..b98ac635e00d 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.cpp
+++ b/llvm/lib/Target/X86/X86ISelLowering.cpp
@@ -949,6 +949,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MULHU, MVT::v8i16, Legal);
setOperationAction(ISD::MULHS, MVT::v8i16, Legal);
setOperationAction(ISD::MUL, MVT::v8i16, Legal);
+ setOperationAction(ISD::AVGCEILU, MVT::v16i8, Legal);
+ setOperationAction(ISD::AVGCEILU, MVT::v8i16, Legal);
setOperationAction(ISD::SMULO, MVT::v16i8, Custom);
setOperationAction(ISD::UMULO, MVT::v16i8, Custom);
@@ -1285,13 +1287,10 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
if (VT == MVT::v4i64) continue;
setOperationAction(ISD::ROTL, VT, Custom);
setOperationAction(ISD::ROTR, VT, Custom);
+ setOperationAction(ISD::FSHL, VT, Custom);
+ setOperationAction(ISD::FSHR, VT, Custom);
}
- setOperationAction(ISD::FSHL, MVT::v32i8, Custom);
- setOperationAction(ISD::FSHR, MVT::v32i8, Custom);
- setOperationAction(ISD::FSHL, MVT::v8i32, Custom);
- setOperationAction(ISD::FSHR, MVT::v8i32, Custom);
-
// These types need custom splitting if their input is a 128-bit vector.
setOperationAction(ISD::SIGN_EXTEND, MVT::v8i64, Custom);
setOperationAction(ISD::SIGN_EXTEND, MVT::v16i32, Custom);
@@ -1353,6 +1352,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MULHS, MVT::v16i16, HasInt256 ? Legal : Custom);
setOperationAction(ISD::MULHU, MVT::v32i8, Custom);
setOperationAction(ISD::MULHS, MVT::v32i8, Custom);
+ setOperationAction(ISD::AVGCEILU, MVT::v16i16, HasInt256 ? Legal : Custom);
+ setOperationAction(ISD::AVGCEILU, MVT::v32i8, HasInt256 ? Legal : Custom);
setOperationAction(ISD::SMULO, MVT::v32i8, Custom);
setOperationAction(ISD::UMULO, MVT::v32i8, Custom);
@@ -1652,6 +1653,8 @@ X86TargetLowering::X86TargetLowering(const X86TargetMachine &TM,
setOperationAction(ISD::MULHU, MVT::v32i16, HasBWI ? Legal : Custom);
setOperationAction(ISD::MULHS, MVT::v64i8, Custom);
setOperationAction(ISD::MULHU, MVT::v64i8, Custom);
+ setOperationAction(ISD::AVGCEILU, MVT::v32i16, HasBWI ? Legal : Custom);
+ setOperationAction(ISD::AVGCEILU, MVT::v64i8, HasBWI ? Legal : Custom);
setOperationAction(ISD::SMULO, MVT::v64i8, Custom);
setOperationAction(ISD::UMULO, MVT::v64i8, Custom);
@@ -25700,6 +25703,89 @@ static SDValue getTargetVShiftByConstNode(unsigned Opc, const SDLoc &dl, MVT VT,
DAG.getTargetConstant(ShiftAmt, dl, MVT::i8));
}
+/// Handle vector element shifts by a splat shift amount
+static SDValue getTargetVShiftNode(unsigned Opc, const SDLoc &dl, MVT VT,
+ SDValue SrcOp, SDValue ShAmt, int ShAmtIdx,
+ const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ MVT AmtVT = ShAmt.getSimpleValueType();
+ assert(AmtVT.isVector() && "Vector shift type mismatch");
+ assert(0 <= ShAmtIdx && ShAmtIdx < (int)AmtVT.getVectorNumElements() &&
+ "Illegal vector splat index");
+
+ // Move the splat element to the bottom element.
+ if (ShAmtIdx != 0) {
+ SmallVector<int> Mask(AmtVT.getVectorNumElements(), -1);
+ Mask[0] = ShAmtIdx;
+ ShAmt = DAG.getVectorShuffle(AmtVT, dl, ShAmt, DAG.getUNDEF(AmtVT), Mask);
+ }
+
+ // See if we can mask off the upper elements using the existing source node.
+ // The shift uses the entire lower 64-bits of the amount vector, so no need to
+ // do this for vXi64 types.
+ bool IsMasked = false;
+ if (AmtVT.getScalarSizeInBits() < 64) {
+ if (ShAmt.getOpcode() == ISD::BUILD_VECTOR ||
+ ShAmt.getOpcode() == ISD::SCALAR_TO_VECTOR) {
+ // If the shift amount has come from a scalar, then zero-extend the scalar
+ // before moving to the vector.
+ ShAmt = DAG.getZExtOrTrunc(ShAmt.getOperand(0), dl, MVT::i32);
+ ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
+ ShAmt = DAG.getNode(X86ISD::VZEXT_MOVL, dl, MVT::v4i32, ShAmt);
+ AmtVT = MVT::v4i32;
+ IsMasked = true;
+ } else if (ShAmt.getOpcode() == ISD::AND) {
+ // See if the shift amount is already masked (e.g. for rotation modulo),
+ // then we can zero-extend it by setting all the other mask elements to
+ // zero.
+ SmallVector<SDValue> MaskElts(
+ AmtVT.getVectorNumElements(),
+ DAG.getConstant(0, dl, AmtVT.getScalarType()));
+ MaskElts[0] = DAG.getAllOnesConstant(dl, AmtVT.getScalarType());
+ SDValue Mask = DAG.getBuildVector(AmtVT, dl, MaskElts);
+ if ((Mask = DAG.FoldConstantArithmetic(ISD::AND, dl, AmtVT,
+ {ShAmt.getOperand(1), Mask}))) {
+ ShAmt = DAG.getNode(ISD::AND, dl, AmtVT, ShAmt.getOperand(0), Mask);
+ IsMasked = true;
+ }
+ }
+ }
+
+ // Extract if the shift amount vector is larger than 128-bits.
+ if (AmtVT.getSizeInBits() > 128) {
+ ShAmt = extract128BitVector(ShAmt, 0, DAG, dl);
+ AmtVT = ShAmt.getSimpleValueType();
+ }
+
+ // Zero-extend bottom element to v2i64 vector type, either by extension or
+ // shuffle masking.
+ if (!IsMasked && AmtVT.getScalarSizeInBits() < 64) {
+ if (Subtarget.hasSSE41())
+ ShAmt = DAG.getNode(ISD::ZERO_EXTEND_VECTOR_INREG, SDLoc(ShAmt),
+ MVT::v2i64, ShAmt);
+ else {
+ SDValue ByteShift = DAG.getTargetConstant(
+ (128 - AmtVT.getScalarSizeInBits()) / 8, SDLoc(ShAmt), MVT::i8);
+ ShAmt = DAG.getBitcast(MVT::v16i8, ShAmt);
+ ShAmt = DAG.getNode(X86ISD::VSHLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
+ ByteShift);
+ ShAmt = DAG.getNode(X86ISD::VSRLDQ, SDLoc(ShAmt), MVT::v16i8, ShAmt,
+ ByteShift);
+ }
+ }
+
+ // Change opcode to non-immediate version.
+ Opc = getTargetVShiftUniformOpcode(Opc, true);
+
+ // The return type has to be a 128-bit type with the same element
+ // type as the input type.
+ MVT EltVT = VT.getVectorElementType();
+ MVT ShVT = MVT::getVectorVT(EltVT, 128 / EltVT.getSizeInBits());
+
+ ShAmt = DAG.getBitcast(ShVT, ShAmt);
+ return DAG.getNode(Opc, dl, VT, SrcOp, ShAmt);
+}
+
/// Handle vector element shifts where the shift amount may or may not be a
/// constant. Takes immediate version of shift as input.
/// TODO: Replace with vector + (splat) idx to avoid extract_element nodes.
@@ -26444,6 +26530,8 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
case VSHIFT: {
SDValue SrcOp = Op.getOperand(1);
SDValue ShAmt = Op.getOperand(2);
+ assert(ShAmt.getValueType() == MVT::i32 &&
+ "Unexpected VSHIFT amount type");
// Catch shift-by-constant.
if (auto *CShAmt = dyn_cast<ConstantSDNode>(ShAmt))
@@ -26451,8 +26539,9 @@ SDValue X86TargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
Op.getSimpleValueType(), SrcOp,
CShAmt->getZExtValue(), DAG);
+ ShAmt = DAG.getNode(ISD::SCALAR_TO_VECTOR, dl, MVT::v4i32, ShAmt);
return getTargetVShiftNode(IntrData->Opc0, dl, Op.getSimpleValueType(),
- SrcOp, ShAmt, Subtarget, DAG);
+ SrcOp, ShAmt, 0, Subtarget, DAG);
}
case COMPRESS_EXPAND_IN_REG: {
SDValue Mask = Op.getOperand(3);
@@ -28394,6 +28483,21 @@ static SDValue LowerABS(SDValue Op, const X86Subtarget &Subtarget,
return SDValue();
}
+static SDValue LowerAVG(SDValue Op, const X86Subtarget &Subtarget,
+ SelectionDAG &DAG) {
+ MVT VT = Op.getSimpleValueType();
+
+ // For AVX1 cases, split to use legal ops (everything but v4i64).
+ if (VT.is256BitVector() && !Subtarget.hasInt256())
+ return splitVectorIntBinary(Op, DAG);
+
+ if (VT == MVT::v32i16 || VT == MVT::v64i8)
+ return splitVectorIntBinary(Op, DAG);
+
+ // Default to expand.
+ return SDValue();
+}
+
static SDValue LowerMINMAX(SDValue Op, SelectionDAG &DAG) {
MVT VT = Op.getSimpleValueType();
@@ -29843,8 +29947,8 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
{Op0, Op1, Amt}, DAG, Subtarget);
}
assert((VT == MVT::v16i8 || VT == MVT::v32i8 || VT == MVT::v64i8 ||
- VT == MVT::v8i16 || VT == MVT::v4i32 || VT == MVT::v8i32 ||
- VT == MVT::v16i32) &&
+ VT == MVT::v8i16 || VT == MVT::v16i16 || VT == MVT::v4i32 ||
+ VT == MVT::v8i32 || VT == MVT::v16i32) &&
"Unexpected funnel shift type!");
// fshl(x,y,z) -> unpack(y,x) << (z & (bw-1))) >> bw.
@@ -29867,7 +29971,7 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
// Split 256-bit integers on XOP/pre-AVX2 targets.
// Split 512-bit integers on non 512-bit BWI targets.
- if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 32) ||
+ if ((VT.is256BitVector() && ((Subtarget.hasXOP() && EltSizeInBits < 16) ||
!Subtarget.hasAVX2())) ||
(VT.is512BitVector() && !Subtarget.useBWIRegs() &&
EltSizeInBits < 32)) {
@@ -29878,18 +29982,18 @@ static SDValue LowerFunnelShift(SDValue Op, const X86Subtarget &Subtarget,
// Attempt to fold scalar shift as unpack(y,x) << zext(splat(z))
if (supportedVectorShiftWithBaseAmnt(ExtVT, Subtarget, ShiftOpc)) {
- if (SDValue ScalarAmt = DAG.getSplatValue(AmtMod)) {
+ int ScalarAmtIdx = -1;
+ if (SDValue ScalarAmt = DAG.getSplatSourceVector(AmtMod, ScalarAmtIdx)) {
// Uniform vXi16 funnel shifts can be efficiently handled by default.
if (EltSizeInBits == 16)
return SDValue();
SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, Op1, Op0));
SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, Op1, Op0));
- ScalarAmt = DAG.getZExtOrTrunc(ScalarAmt, DL, MVT::i32);
- Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt, Subtarget,
- DAG);
- Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt, Subtarget,
- DAG);
+ Lo = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Lo, ScalarAmt,
+ ScalarAmtIdx, Subtarget, DAG);
+ Hi = getTargetVShiftNode(ShiftOpc, DL, ExtVT, Hi, ScalarAmt,
+ ScalarAmtIdx, Subtarget, DAG);
return getPack(DAG, Subtarget, DL, VT, Lo, Hi, !IsFSHR);
}
}
@@ -30082,15 +30186,15 @@ static SDValue LowerRotate(SDValue Op, const X86Subtarget &Subtarget,
// TODO: Handle vXi16 cases on all targets.
if (EltSizeInBits == 8 || EltSizeInBits == 32 ||
(IsROTL && EltSizeInBits == 16 && !Subtarget.hasAVX())) {
- if (SDValue BaseRotAmt = DAG.getSplatValue(AmtMod)) {
+ int BaseRotAmtIdx = -1;
+ if (SDValue BaseRotAmt = DAG.getSplatSourceVector(AmtMod, BaseRotAmtIdx)) {
unsigned ShiftX86Opc = IsROTL ? X86ISD::VSHLI : X86ISD::VSRLI;
SDValue Lo = DAG.getBitcast(ExtVT, getUnpackl(DAG, DL, VT, R, R));
SDValue Hi = DAG.getBitcast(ExtVT, getUnpackh(DAG, DL, VT, R, R));
- BaseRotAmt = DAG.getZExtOrTrunc(BaseRotAmt, DL, MVT::i32);
Lo = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Lo, BaseRotAmt,
- Subtarget, DAG);
+ BaseRotAmtIdx, Subtarget, DAG);
Hi = getTargetVShiftNode(ShiftX86Opc, DL, ExtVT, Hi, BaseRotAmt,
- Subtarget, DAG);
+ BaseRotAmtIdx, Subtarget, DAG);
return getPack(DAG, Subtarget, DL, VT, Lo, Hi, IsROTL);
}
}
@@ -31712,6 +31816,7 @@ SDValue X86TargetLowering::LowerOperation(SDValue Op, SelectionDAG &DAG) const {
case ISD::UMAX:
case ISD::UMIN: return LowerMINMAX(Op, DAG);
case ISD::ABS: return LowerABS(Op, Subtarget, DAG);
+ case ISD::AVGCEILU: return LowerAVG(Op, Subtarget, DAG);
case ISD::FSINCOS: return LowerFSINCOS(Op, Subtarget, DAG);
case ISD::MLOAD: return LowerMLOAD(Op, Subtarget, DAG);
case ISD::MSTORE: return LowerMSTORE(Op, Subtarget, DAG);
@@ -31807,9 +31912,8 @@ void X86TargetLowering::ReplaceNodeResults(SDNode *N,
Results.push_back(Res);
return;
}
- case X86ISD::VPMADDWD:
- case X86ISD::AVG: {
- // Legalize types for X86ISD::AVG/VPMADDWD by widening.
+ case X86ISD::VPMADDWD: {
+ // Legalize types for X86ISD::VPMADDWD by widening.
assert(Subtarget.hasSSE2() && "Requires at least SSE2!");
EVT VT = N->getValueType(0);
@@ -33041,7 +33145,6 @@ const char *X86TargetLowering::getTargetNodeName(unsigned Opcode) const {
NODE_NAME_CASE(SCALEF_RND)
NODE_NAME_CASE(SCALEFS)
NODE_NAME_CASE(SCALEFS_RND)
- NODE_NAME_CASE(AVG)
NODE_NAME_CASE(MULHRS)
NODE_NAME_CASE(SINT_TO_FP_RND)
NODE_NAME_CASE(UINT_TO_FP_RND)
@@ -33222,7 +33325,6 @@ bool X86TargetLowering::isBinOp(unsigned Opcode) const {
bool X86TargetLowering::isCommutativeBinOp(unsigned Opcode) const {
switch (Opcode) {
// TODO: Add more X86ISD opcodes once we have test coverage.
- case X86ISD::AVG:
case X86ISD::PCMPEQ:
case X86ISD::PMULDQ:
case X86ISD::PMULUDQ:
@@ -40632,7 +40734,6 @@ bool X86TargetLowering::SimplifyDemandedVectorEltsForTargetNode(
case X86ISD::UNPCKH:
case X86ISD::BLENDI:
// Integer ops.
- case X86ISD::AVG:
case X86ISD::PACKSS:
case X86ISD::PACKUS:
// Horizontal Ops.
@@ -43123,6 +43224,104 @@ static SDValue combineExtractVectorElt(SDNode *N, SelectionDAG &DAG,
return SDValue();
}
+// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
+// This is more or less the reverse of combineBitcastvxi1.
+static SDValue combineToExtendBoolVectorInReg(
+ unsigned Opcode, const SDLoc &DL, EVT VT, SDValue N0, SelectionDAG &DAG,
+ TargetLowering::DAGCombinerInfo &DCI, const X86Subtarget &Subtarget) {
+ if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
+ Opcode != ISD::ANY_EXTEND)
+ return SDValue();
+ if (!DCI.isBeforeLegalizeOps())
+ return SDValue();
+ if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
+ return SDValue();
+
+ EVT SVT = VT.getScalarType();
+ EVT InSVT = N0.getValueType().getScalarType();
+ unsigned EltSizeInBits = SVT.getSizeInBits();
+
+ // Input type must be extending a bool vector (bit-casted from a scalar
+ // integer) to legal integer types.
+ if (!VT.isVector())
+ return SDValue();
+ if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
+ return SDValue();
+ if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
+ return SDValue();
+
+ SDValue N00 = N0.getOperand(0);
+ EVT SclVT = N00.getValueType();
+ if (!SclVT.isScalarInteger())
+ return SDValue();
+
+ SDValue Vec;
+ SmallVector<int> ShuffleMask;
+ unsigned NumElts = VT.getVectorNumElements();
+ assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
+
+ // Broadcast the scalar integer to the vector elements.
+ if (NumElts > EltSizeInBits) {
+ // If the scalar integer is greater than the vector element size, then we
+ // must split it down into sub-sections for broadcasting. For example:
+ // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
+ // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
+ assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
+ unsigned Scale = NumElts / EltSizeInBits;
+ EVT BroadcastVT = EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
+ Vec = DAG.getBitcast(VT, Vec);
+
+ for (unsigned i = 0; i != Scale; ++i)
+ ShuffleMask.append(EltSizeInBits, i);
+ Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
+ } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
+ (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
+ // If we have register broadcast instructions, use the scalar size as the
+ // element type for the shuffle. Then cast to the wider element type. The
+ // widened bits won't be used, and this might allow the use of a broadcast
+ // load.
+ assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
+ unsigned Scale = EltSizeInBits / NumElts;
+ EVT BroadcastVT =
+ EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
+ ShuffleMask.append(NumElts * Scale, 0);
+ Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
+ Vec = DAG.getBitcast(VT, Vec);
+ } else {
+ // For smaller scalar integers, we can simply any-extend it to the vector
+ // element size (we don't care about the upper bits) and broadcast it to all
+ // elements.
+ SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
+ Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
+ ShuffleMask.append(NumElts, 0);
+ Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
+ }
+
+ // Now, mask the relevant bit in each element.
+ SmallVector<SDValue, 32> Bits;
+ for (unsigned i = 0; i != NumElts; ++i) {
+ int BitIdx = (i % EltSizeInBits);
+ APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
+ Bits.push_back(DAG.getConstant(Bit, DL, SVT));
+ }
+ SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
+ Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
+
+ // Compare against the bitmask and extend the result.
+ EVT CCVT = VT.changeVectorElementType(MVT::i1);
+ Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
+ Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
+
+ // For SEXT, this is now done, otherwise shift the result down for
+ // zero-extension.
+ if (Opcode == ISD::SIGN_EXTEND)
+ return Vec;
+ return DAG.getNode(ISD::SRL, DL, VT, Vec,
+ DAG.getConstant(EltSizeInBits - 1, DL, VT));
+}
+
/// If a vector select has an operand that is -1 or 0, try to simplify the
/// select to a bitwise logic operation.
/// TODO: Move to DAGCombiner, possibly using TargetLowering::hasAndNot()?
@@ -43340,19 +43539,17 @@ static SDValue combineSelectOfTwoConstants(SDNode *N, SelectionDAG &DAG) {
/// This function will also call SimplifyDemandedBits on already created
/// BLENDV to perform additional simplifications.
static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget &Subtarget) {
+ TargetLowering::DAGCombinerInfo &DCI,
+ const X86Subtarget &Subtarget) {
SDValue Cond = N->getOperand(0);
if ((N->getOpcode() != ISD::VSELECT &&
N->getOpcode() != X86ISD::BLENDV) ||
ISD::isBuildVectorOfConstantSDNodes(Cond.getNode()))
return SDValue();
- // Don't optimize before the condition has been transformed to a legal type
- // and don't ever optimize vector selects that map to AVX512 mask-registers.
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
unsigned BitWidth = Cond.getScalarValueSizeInBits();
- if (BitWidth < 8 || BitWidth > 64)
- return SDValue();
+ EVT VT = N->getValueType(0);
// We can only handle the cases where VSELECT is directly legal on the
// subtarget. We custom lower VSELECT nodes with constant conditions and
@@ -43364,8 +43561,6 @@ static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
// Potentially, we should combine constant-condition vselect nodes
// pre-legalization into shuffles and not mark as many types as custom
// lowered.
- const TargetLowering &TLI = DAG.getTargetLoweringInfo();
- EVT VT = N->getValueType(0);
if (!TLI.isOperationLegalOrCustom(ISD::VSELECT, VT))
return SDValue();
// FIXME: We don't support i16-element blends currently. We could and
@@ -43383,6 +43578,22 @@ static SDValue combineVSelectToBLENDV(SDNode *N, SelectionDAG &DAG,
if (VT.is512BitVector())
return SDValue();
+ // PreAVX512, without mask-registers, attempt to sign-extend bool vectors to
+ // allow us to use BLENDV.
+ if (!Subtarget.hasAVX512() && BitWidth == 1) {
+ EVT CondVT = VT.changeVectorElementTypeToInteger();
+ if (SDValue ExtCond = combineToExtendBoolVectorInReg(
+ ISD::SIGN_EXTEND, SDLoc(N), CondVT, Cond, DAG, DCI, Subtarget)) {
+ return DAG.getNode(X86ISD::BLENDV, SDLoc(N), VT, ExtCond,
+ N->getOperand(1), N->getOperand(2));
+ }
+ }
+
+ // Don't optimize before the condition has been transformed to a legal type
+ // and don't ever optimize vector selects that map to AVX512 mask-registers.
+ if (BitWidth < 8 || BitWidth > 64)
+ return SDValue();
+
auto OnlyUsedAsSelectCond = [](SDValue Cond) {
for (SDNode::use_iterator UI = Cond->use_begin(), UE = Cond->use_end();
UI != UE; ++UI)
@@ -46876,30 +47087,44 @@ static SDValue combineAnd(SDNode *N, SelectionDAG &DAG,
// If either operand is a constant mask, then only the elements that aren't
// zero are actually demanded by the other operand.
- auto SimplifyUndemandedElts = [&](SDValue Op, SDValue OtherOp) {
+ auto GetDemandedMasks = [&](SDValue Op) {
APInt UndefElts;
SmallVector<APInt> EltBits;
int NumElts = VT.getVectorNumElements();
int EltSizeInBits = VT.getScalarSizeInBits();
- if (!getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts, EltBits))
- return false;
-
- APInt DemandedBits = APInt::getZero(EltSizeInBits);
- APInt DemandedElts = APInt::getZero(NumElts);
- for (int I = 0; I != NumElts; ++I)
- if (!EltBits[I].isZero()) {
- DemandedBits |= EltBits[I];
- DemandedElts.setBit(I);
- }
-
- return TLI.SimplifyDemandedVectorElts(OtherOp, DemandedElts, DCI) ||
- TLI.SimplifyDemandedBits(OtherOp, DemandedBits, DemandedElts, DCI);
+ APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
+ APInt DemandedElts = APInt::getAllOnes(NumElts);
+ if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
+ EltBits)) {
+ DemandedBits.clearAllBits();
+ DemandedElts.clearAllBits();
+ for (int I = 0; I != NumElts; ++I)
+ if (!EltBits[I].isZero()) {
+ DemandedBits |= EltBits[I];
+ DemandedElts.setBit(I);
+ }
+ }
+ return std::make_pair(DemandedBits, DemandedElts);
};
- if (SimplifyUndemandedElts(N0, N1) || SimplifyUndemandedElts(N1, N0)) {
+ std::pair<APInt, APInt> Demand0 = GetDemandedMasks(N1);
+ std::pair<APInt, APInt> Demand1 = GetDemandedMasks(N0);
+
+ if (TLI.SimplifyDemandedVectorElts(N0, Demand0.second, DCI) ||
+ TLI.SimplifyDemandedVectorElts(N1, Demand1.second, DCI) ||
+ TLI.SimplifyDemandedBits(N0, Demand0.first, Demand0.second, DCI) ||
+ TLI.SimplifyDemandedBits(N1, Demand1.first, Demand1.second, DCI)) {
if (N->getOpcode() != ISD::DELETED_NODE)
DCI.AddToWorklist(N);
return SDValue(N, 0);
}
+
+ SDValue NewN0 = TLI.SimplifyMultipleUseDemandedBits(N0, Demand0.first,
+ Demand0.second, DAG);
+ SDValue NewN1 = TLI.SimplifyMultipleUseDemandedBits(N1, Demand1.first,
+ Demand1.second, DAG);
+ if (NewN0 || NewN1)
+ return DAG.getNode(ISD::AND, dl, VT, NewN0 ? NewN0 : N0,
+ NewN1 ? NewN1 : N1);
}
// Attempt to combine a scalar bitmask AND with an extracted shuffle.
@@ -47679,7 +47904,7 @@ static SDValue combineTruncateWithSat(SDValue In, EVT VT, const SDLoc &DL,
/// This function detects the AVG pattern between vectors of unsigned i8/i16,
/// which is c = (a + b + 1) / 2, and replace this operation with the efficient
-/// X86ISD::AVG instruction.
+/// ISD::AVGCEILU (AVG) instruction.
static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
const X86Subtarget &Subtarget,
const SDLoc &DL) {
@@ -47742,7 +47967,7 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG,
auto AVGBuilder = [](SelectionDAG &DAG, const SDLoc &DL,
ArrayRef<SDValue> Ops) {
- return DAG.getNode(X86ISD::AVG, DL, Ops[0].getValueType(), Ops);
+ return DAG.getNode(ISD::AVGCEILU, DL, Ops[0].getValueType(), Ops);
};
auto AVGSplitter = [&](std::array<SDValue, 2> Ops) {
@@ -50113,26 +50338,62 @@ static SDValue combineCVTP2I_CVTTP2I(SDNode *N, SelectionDAG &DAG,
static SDValue combineAndnp(SDNode *N, SelectionDAG &DAG,
TargetLowering::DAGCombinerInfo &DCI,
const X86Subtarget &Subtarget) {
+ SDValue N0 = N->getOperand(0);
+ SDValue N1 = N->getOperand(1);
MVT VT = N->getSimpleValueType(0);
// ANDNP(0, x) -> x
- if (ISD::isBuildVectorAllZeros(N->getOperand(0).getNode()))
- return N->getOperand(1);
+ if (ISD::isBuildVectorAllZeros(N0.getNode()))
+ return N1;
// ANDNP(x, 0) -> 0
- if (ISD::isBuildVectorAllZeros(N->getOperand(1).getNode()))
+ if (ISD::isBuildVectorAllZeros(N1.getNode()))
return DAG.getConstant(0, SDLoc(N), VT);
// Turn ANDNP back to AND if input is inverted.
- if (SDValue Not = IsNOT(N->getOperand(0), DAG))
- return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not),
- N->getOperand(1));
+ if (SDValue Not = IsNOT(N0, DAG))
+ return DAG.getNode(ISD::AND, SDLoc(N), VT, DAG.getBitcast(VT, Not), N1);
// Attempt to recursively combine a bitmask ANDNP with shuffles.
if (VT.isVector() && (VT.getScalarSizeInBits() % 8) == 0) {
SDValue Op(N, 0);
if (SDValue Res = combineX86ShufflesRecursively(Op, DAG, Subtarget))
return Res;
+
+ // If either operand is a constant mask, then only the elements that aren't
+ // zero are actually demanded by the other operand.
+ auto GetDemandedMasks = [&](SDValue Op, bool Invert = false) {
+ APInt UndefElts;
+ SmallVector<APInt> EltBits;
+ int NumElts = VT.getVectorNumElements();
+ int EltSizeInBits = VT.getScalarSizeInBits();
+ APInt DemandedBits = APInt::getAllOnes(EltSizeInBits);
+ APInt DemandedElts = APInt::getAllOnes(NumElts);
+ if (getTargetConstantBitsFromNode(Op, EltSizeInBits, UndefElts,
+ EltBits)) {
+ DemandedBits.clearAllBits();
+ DemandedElts.clearAllBits();
+ for (int I = 0; I != NumElts; ++I)
+ if ((Invert && !EltBits[I].isAllOnes()) ||
+ (!Invert && !EltBits[I].isZero())) {
+ DemandedBits |= Invert ? ~EltBits[I] : EltBits[I];
+ DemandedElts.setBit(I);
+ }
+ }
+ return std::make_pair(DemandedBits, DemandedElts);
+ };
+ std::pair<APInt, APInt> Demand0 = GetDemandedMasks(N1);
+ std::pair<APInt, APInt> Demand1 = GetDemandedMasks(N0, true);
+
+ const TargetLowering &TLI = DAG.getTargetLoweringInfo();
+ if (TLI.SimplifyDemandedVectorElts(N0, Demand0.second, DCI) ||
+ TLI.SimplifyDemandedVectorElts(N1, Demand1.second, DCI) ||
+ TLI.SimplifyDemandedBits(N0, Demand0.first, Demand0.second, DCI) ||
+ TLI.SimplifyDemandedBits(N1, Demand1.first, Demand1.second, DCI)) {
+ if (N->getOpcode() != ISD::DELETED_NODE)
+ DCI.AddToWorklist(N);
+ return SDValue(N, 0);
+ }
}
return SDValue();
@@ -50420,110 +50681,6 @@ static SDValue combineToExtendCMOV(SDNode *Extend, SelectionDAG &DAG) {
return Res;
}
-// Convert (vXiY *ext(vXi1 bitcast(iX))) to extend_in_reg(broadcast(iX)).
-// This is more or less the reverse of combineBitcastvxi1.
-static SDValue
-combineToExtendBoolVectorInReg(SDNode *N, SelectionDAG &DAG,
- TargetLowering::DAGCombinerInfo &DCI,
- const X86Subtarget &Subtarget) {
- unsigned Opcode = N->getOpcode();
- if (Opcode != ISD::SIGN_EXTEND && Opcode != ISD::ZERO_EXTEND &&
- Opcode != ISD::ANY_EXTEND)
- return SDValue();
- if (!DCI.isBeforeLegalizeOps())
- return SDValue();
- if (!Subtarget.hasSSE2() || Subtarget.hasAVX512())
- return SDValue();
-
- SDValue N0 = N->getOperand(0);
- EVT VT = N->getValueType(0);
- EVT SVT = VT.getScalarType();
- EVT InSVT = N0.getValueType().getScalarType();
- unsigned EltSizeInBits = SVT.getSizeInBits();
-
- // Input type must be extending a bool vector (bit-casted from a scalar
- // integer) to legal integer types.
- if (!VT.isVector())
- return SDValue();
- if (SVT != MVT::i64 && SVT != MVT::i32 && SVT != MVT::i16 && SVT != MVT::i8)
- return SDValue();
- if (InSVT != MVT::i1 || N0.getOpcode() != ISD::BITCAST)
- return SDValue();
-
- SDValue N00 = N0.getOperand(0);
- EVT SclVT = N0.getOperand(0).getValueType();
- if (!SclVT.isScalarInteger())
- return SDValue();
-
- SDLoc DL(N);
- SDValue Vec;
- SmallVector<int, 32> ShuffleMask;
- unsigned NumElts = VT.getVectorNumElements();
- assert(NumElts == SclVT.getSizeInBits() && "Unexpected bool vector size");
-
- // Broadcast the scalar integer to the vector elements.
- if (NumElts > EltSizeInBits) {
- // If the scalar integer is greater than the vector element size, then we
- // must split it down into sub-sections for broadcasting. For example:
- // i16 -> v16i8 (i16 -> v8i16 -> v16i8) with 2 sub-sections.
- // i32 -> v32i8 (i32 -> v8i32 -> v32i8) with 4 sub-sections.
- assert((NumElts % EltSizeInBits) == 0 && "Unexpected integer scale");
- unsigned Scale = NumElts / EltSizeInBits;
- EVT BroadcastVT =
- EVT::getVectorVT(*DAG.getContext(), SclVT, EltSizeInBits);
- Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
- Vec = DAG.getBitcast(VT, Vec);
-
- for (unsigned i = 0; i != Scale; ++i)
- ShuffleMask.append(EltSizeInBits, i);
- Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
- } else if (Subtarget.hasAVX2() && NumElts < EltSizeInBits &&
- (SclVT == MVT::i8 || SclVT == MVT::i16 || SclVT == MVT::i32)) {
- // If we have register broadcast instructions, use the scalar size as the
- // element type for the shuffle. Then cast to the wider element type. The
- // widened bits won't be used, and this might allow the use of a broadcast
- // load.
- assert((EltSizeInBits % NumElts) == 0 && "Unexpected integer scale");
- unsigned Scale = EltSizeInBits / NumElts;
- EVT BroadcastVT =
- EVT::getVectorVT(*DAG.getContext(), SclVT, NumElts * Scale);
- Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, BroadcastVT, N00);
- ShuffleMask.append(NumElts * Scale, 0);
- Vec = DAG.getVectorShuffle(BroadcastVT, DL, Vec, Vec, ShuffleMask);
- Vec = DAG.getBitcast(VT, Vec);
- } else {
- // For smaller scalar integers, we can simply any-extend it to the vector
- // element size (we don't care about the upper bits) and broadcast it to all
- // elements.
- SDValue Scl = DAG.getAnyExtOrTrunc(N00, DL, SVT);
- Vec = DAG.getNode(ISD::SCALAR_TO_VECTOR, DL, VT, Scl);
- ShuffleMask.append(NumElts, 0);
- Vec = DAG.getVectorShuffle(VT, DL, Vec, Vec, ShuffleMask);
- }
-
- // Now, mask the relevant bit in each element.
- SmallVector<SDValue, 32> Bits;
- for (unsigned i = 0; i != NumElts; ++i) {
- int BitIdx = (i % EltSizeInBits);
- APInt Bit = APInt::getBitsSet(EltSizeInBits, BitIdx, BitIdx + 1);
- Bits.push_back(DAG.getConstant(Bit, DL, SVT));
- }
- SDValue BitMask = DAG.getBuildVector(VT, DL, Bits);
- Vec = DAG.getNode(ISD::AND, DL, VT, Vec, BitMask);
-
- // Compare against the bitmask and extend the result.
- EVT CCVT = VT.changeVectorElementType(MVT::i1);
- Vec = DAG.getSetCC(DL, CCVT, Vec, BitMask, ISD::SETEQ);
- Vec = DAG.getSExtOrTrunc(Vec, DL, VT);
-
- // For SEXT, this is now done, otherwise shift the result down for
- // zero-extension.
- if (Opcode == ISD::SIGN_EXTEND)
- return Vec;
- return DAG.getNode(ISD::SRL, DL, VT, Vec,
- DAG.getConstant(EltSizeInBits - 1, DL, VT));
-}
-
// Attempt to combine a (sext/zext (setcc)) to a setcc with a xmm/ymm/zmm
// result type.
static SDValue combineExtSetcc(SDNode *N, SelectionDAG &DAG,
@@ -50603,7 +50760,8 @@ static SDValue combineSext(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
- if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
+ if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), DL, VT, N0,
+ DAG, DCI, Subtarget))
return V;
if (VT.isVector()) {
@@ -50757,7 +50915,8 @@ static SDValue combineZext(SDNode *N, SelectionDAG &DAG,
if (SDValue V = combineExtSetcc(N, DAG, Subtarget))
return V;
- if (SDValue V = combineToExtendBoolVectorInReg(N, DAG, DCI, Subtarget))
+ if (SDValue V = combineToExtendBoolVectorInReg(N->getOpcode(), dl, VT, N0,
+ DAG, DCI, Subtarget))
return V;
if (VT.isVector())
diff --git a/llvm/lib/Target/X86/X86ISelLowering.h b/llvm/lib/Target/X86/X86ISelLowering.h
index 50c7e2c319f6..99299dc884c9 100644
--- a/llvm/lib/Target/X86/X86ISelLowering.h
+++ b/llvm/lib/Target/X86/X86ISelLowering.h
@@ -249,9 +249,6 @@ namespace llvm {
SCALEFS,
SCALEFS_RND,
- // Unsigned Integer average.
- AVG,
-
/// Integer horizontal add/sub.
HADD,
HSUB,
diff --git a/llvm/lib/Target/X86/X86InstrAVX512.td b/llvm/lib/Target/X86/X86InstrAVX512.td
index bc67d1f89d7f..918d11008d20 100644
--- a/llvm/lib/Target/X86/X86InstrAVX512.td
+++ b/llvm/lib/Target/X86/X86InstrAVX512.td
@@ -5039,7 +5039,7 @@ defm VPMULHUW : avx512_binop_rm_vl_w<0xE4, "vpmulhuw", mulhu, SchedWriteVecIMul,
HasBWI, 1>;
defm VPMULHRSW : avx512_binop_rm_vl_w<0x0B, "vpmulhrsw", X86mulhrs,
SchedWriteVecIMul, HasBWI, 1>, T8PD;
-defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", X86avg,
+defm VPAVG : avx512_binop_rm_vl_bw<0xE0, 0xE3, "vpavg", avgceilu,
SchedWriteVecALU, HasBWI, 1>;
defm VPMULDQ : avx512_binop_rm_vl_q<0x28, "vpmuldq", X86pmuldq,
SchedWriteVecIMul, HasAVX512, 1>, T8PD;
diff --git a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
index 166f1f8c3251..57ba4683c6a4 100644
--- a/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
+++ b/llvm/lib/Target/X86/X86InstrFragmentsSIMD.td
@@ -287,7 +287,6 @@ def SDTX86CmpPTest : SDTypeProfile<1, 2, [SDTCisVT<0, i32>,
SDTCisSameAs<2, 1>]>;
def X86mulhrs : SDNode<"X86ISD::MULHRS", SDTIntBinOp, [SDNPCommutative]>;
-def X86avg : SDNode<"X86ISD::AVG" , SDTIntBinOp, [SDNPCommutative]>;
def X86ptest : SDNode<"X86ISD::PTEST", SDTX86CmpPTest>;
def X86testp : SDNode<"X86ISD::TESTP", SDTX86CmpPTest>;
def X86kortest : SDNode<"X86ISD::KORTEST", SDTX86CmpPTest>;
diff --git a/llvm/lib/Target/X86/X86InstrSSE.td b/llvm/lib/Target/X86/X86InstrSSE.td
index 035f139e6f33..5f15a69990af 100644
--- a/llvm/lib/Target/X86/X86InstrSSE.td
+++ b/llvm/lib/Target/X86/X86InstrSSE.td
@@ -3471,9 +3471,9 @@ defm PMAXUB : PDI_binop_all<0xDE, "pmaxub", umax, v16i8, v32i8,
SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
defm PMAXSW : PDI_binop_all<0xEE, "pmaxsw", smax, v8i16, v16i16,
SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
-defm PAVGB : PDI_binop_all<0xE0, "pavgb", X86avg, v16i8, v32i8,
+defm PAVGB : PDI_binop_all<0xE0, "pavgb", avgceilu, v16i8, v32i8,
SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
-defm PAVGW : PDI_binop_all<0xE3, "pavgw", X86avg, v8i16, v16i16,
+defm PAVGW : PDI_binop_all<0xE3, "pavgw", avgceilu, v8i16, v16i16,
SchedWriteVecALU, 1, NoVLX_Or_NoBWI>;
defm PMULUDQ : PDI_binop_all<0xF4, "pmuludq", X86pmuludq, v2i64, v4i64,
SchedWriteVecIMul, 1, NoVLX>;
diff --git a/llvm/lib/Target/X86/X86IntrinsicsInfo.h b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
index 1edec96bbec3..3c8be95b43e3 100644
--- a/llvm/lib/Target/X86/X86IntrinsicsInfo.h
+++ b/llvm/lib/Target/X86/X86IntrinsicsInfo.h
@@ -371,8 +371,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx2_packsswb, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx2_packusdw, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(avx2_packuswb, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
- X86_INTRINSIC_DATA(avx2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(avx2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx2_pavg_b, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
+ X86_INTRINSIC_DATA(avx2_pavg_w, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
X86_INTRINSIC_DATA(avx2_pblendvb, BLENDV, X86ISD::BLENDV, 0),
X86_INTRINSIC_DATA(avx2_permd, VPERM_2OP, X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx2_permps, VPERM_2OP, X86ISD::VPERMV, 0),
@@ -818,8 +818,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(avx512_packsswb_512, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(avx512_packusdw_512, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
X86_INTRINSIC_DATA(avx512_packuswb_512, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
- X86_INTRINSIC_DATA(avx512_pavg_b_512, INTR_TYPE_2OP, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(avx512_pavg_w_512, INTR_TYPE_2OP, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(avx512_pavg_b_512, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
+ X86_INTRINSIC_DATA(avx512_pavg_w_512, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
X86_INTRINSIC_DATA(avx512_permvar_df_256, VPERM_2OP, X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx512_permvar_df_512, VPERM_2OP, X86ISD::VPERMV, 0),
X86_INTRINSIC_DATA(avx512_permvar_di_256, VPERM_2OP, X86ISD::VPERMV, 0),
@@ -1281,8 +1281,8 @@ static const IntrinsicData IntrinsicsWithoutChain[] = {
X86_INTRINSIC_DATA(sse2_packssdw_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packsswb_128, INTR_TYPE_2OP, X86ISD::PACKSS, 0),
X86_INTRINSIC_DATA(sse2_packuswb_128, INTR_TYPE_2OP, X86ISD::PACKUS, 0),
- X86_INTRINSIC_DATA(sse2_pavg_b, INTR_TYPE_2OP, X86ISD::AVG, 0),
- X86_INTRINSIC_DATA(sse2_pavg_w, INTR_TYPE_2OP, X86ISD::AVG, 0),
+ X86_INTRINSIC_DATA(sse2_pavg_b, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
+ X86_INTRINSIC_DATA(sse2_pavg_w, INTR_TYPE_2OP, ISD::AVGCEILU, 0),
X86_INTRINSIC_DATA(sse2_pmadd_wd, INTR_TYPE_2OP, X86ISD::VPMADDWD, 0),
X86_INTRINSIC_DATA(sse2_pmovmskb_128, INTR_TYPE_1OP, X86ISD::MOVMSK, 0),
X86_INTRINSIC_DATA(sse2_pmulh_w, INTR_TYPE_2OP, ISD::MULHS, 0),
diff --git a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
index 9c16d3750998..1fb48419df1c 100644
--- a/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
+++ b/llvm/lib/Transforms/Coroutines/CoroFrame.cpp
@@ -27,6 +27,7 @@
#include "llvm/IR/Dominators.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstIterator.h"
+#include "llvm/IR/IntrinsicInst.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/Debug.h"
#include "llvm/Support/MathExtras.h"
@@ -1571,7 +1572,7 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
// Create a store instruction storing the value into the
// coroutine frame.
Instruction *InsertPt = nullptr;
- bool NeedToCopyArgPtrValue = false;
+ Type *ByValTy = nullptr;
if (auto *Arg = dyn_cast<Argument>(Def)) {
// For arguments, we will place the store instruction right after
// the coroutine frame pointer instruction, i.e. bitcast of
@@ -1583,8 +1584,7 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
Arg->getParent()->removeParamAttr(Arg->getArgNo(), Attribute::NoCapture);
if (Arg->hasByValAttr())
- NeedToCopyArgPtrValue = true;
-
+ ByValTy = Arg->getParamByValType();
} else if (auto *CSI = dyn_cast<AnyCoroSuspendInst>(Def)) {
// Don't spill immediately after a suspend; splitting assumes
// that the suspend will be followed by a branch.
@@ -1619,11 +1619,10 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
Builder.SetInsertPoint(InsertPt);
auto *G = Builder.CreateConstInBoundsGEP2_32(
FrameTy, FramePtr, 0, Index, Def->getName() + Twine(".spill.addr"));
- if (NeedToCopyArgPtrValue) {
+ if (ByValTy) {
// For byval arguments, we need to store the pointed value in the frame,
// instead of the pointer itself.
- auto *Value =
- Builder.CreateLoad(Def->getType()->getPointerElementType(), Def);
+ auto *Value = Builder.CreateLoad(ByValTy, Def);
Builder.CreateAlignedStore(Value, G, SpillAlignment);
} else {
Builder.CreateAlignedStore(Def, G, SpillAlignment);
@@ -1641,7 +1640,7 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
auto *GEP = GetFramePointer(E.first);
GEP->setName(E.first->getName() + Twine(".reload.addr"));
- if (NeedToCopyArgPtrValue)
+ if (ByValTy)
CurrentReload = GEP;
else
CurrentReload = Builder.CreateAlignedLoad(
@@ -1664,6 +1663,12 @@ static Instruction *insertSpills(const FrameDataInfo &FrameData,
}
}
+ // Salvage debug info on any dbg.addr that we see. We do not insert them
+ // into each block where we have a use though.
+ if (auto *DI = dyn_cast<DbgAddrIntrinsic>(U)) {
+ coro::salvageDebugInfo(DbgPtrAllocaCache, DI, Shape.OptimizeFrame);
+ }
+
// If we have a single edge PHINode, remove it and replace it with a
// reload from the coroutine frame. (We already took care of multi edge
// PHINodes by rewriting them in the rewritePHIs function).
@@ -2581,8 +2586,10 @@ void coro::salvageDebugInfo(
DVI->replaceVariableLocationOp(OriginalStorage, Storage);
DVI->setExpression(Expr);
- /// It makes no sense to move the dbg.value intrinsic.
- if (!isa<DbgValueInst>(DVI)) {
+ // We only hoist dbg.declare today since it doesn't make sense to hoist
+ // dbg.value or dbg.addr since they do not have the same function wide
+ // guarantees that dbg.declare does.
+ if (!isa<DbgValueInst>(DVI) && !isa<DbgAddrIntrinsic>(DVI)) {
if (auto *II = dyn_cast<InvokeInst>(Storage))
DVI->moveBefore(II->getNormalDest()->getFirstNonPHI());
else if (auto *CBI = dyn_cast<CallBrInst>(Storage))
diff --git a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
index a6d9ce1033f3..e6bccd97e0e7 100644
--- a/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
+++ b/llvm/lib/Transforms/IPO/AlwaysInliner.cpp
@@ -1,4 +1,4 @@
-//===- InlineAlways.cpp - Code to inline always_inline functions ----------===//
+//===- AlwaysInliner.cpp - Code to inline always_inline functions ----------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
@@ -59,9 +59,16 @@ PreservedAnalyses AlwaysInlinerPass::run(Module &M,
for (User *U : F.users())
if (auto *CB = dyn_cast<CallBase>(U))
- if (CB->getCalledFunction() == &F &&
- CB->hasFnAttr(Attribute::AlwaysInline))
- Calls.insert(CB);
+ if (CB->getCalledFunction() == &F) {
+ if (F.hasFnAttribute(Attribute::AlwaysInline)) {
+ // Avoid inlining if noinline call site attribute.
+ if (!CB->isNoInline())
+ Calls.insert(CB);
+ } else if (CB->hasFnAttr(Attribute::AlwaysInline)) {
+ // Ok, alwaysinline call site attribute.
+ Calls.insert(CB);
+ }
+ }
for (CallBase *CB : Calls) {
Function *Caller = CB->getCaller();
@@ -210,6 +217,9 @@ InlineCost AlwaysInlinerLegacyPass::getInlineCost(CallBase &CB) {
if (!CB.hasFnAttr(Attribute::AlwaysInline))
return InlineCost::getNever("no alwaysinline attribute");
+ if (Callee->hasFnAttribute(Attribute::AlwaysInline) && CB.isNoInline())
+ return InlineCost::getNever("noinline call site attribute");
+
auto IsViable = isInlineViable(*Callee);
if (!IsViable.isSuccess())
return InlineCost::getNever(IsViable.getFailureReason());
diff --git a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
index 1cfbae983e4e..f577a6b0f174 100644
--- a/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
+++ b/llvm/lib/Transforms/IPO/OpenMPOpt.cpp
@@ -1458,7 +1458,6 @@ private:
case Intrinsic::nvvm_barrier0_and:
case Intrinsic::nvvm_barrier0_or:
case Intrinsic::nvvm_barrier0_popc:
- case Intrinsic::amdgcn_s_barrier:
return true;
default:
break;
diff --git a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
index 769bfa9cd937..34b0576982f5 100644
--- a/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/HWAddressSanitizer.cpp
@@ -247,13 +247,6 @@ bool shouldDetectUseAfterScope(const Triple &TargetTriple) {
/// An instrumentation pass implementing detection of addressability bugs
/// using tagged pointers.
class HWAddressSanitizer {
-private:
- struct AllocaInfo {
- AllocaInst *AI;
- SmallVector<IntrinsicInst *, 2> LifetimeStart;
- SmallVector<IntrinsicInst *, 2> LifetimeEnd;
- };
-
public:
HWAddressSanitizer(Module &M, bool CompileKernel, bool Recover,
const StackSafetyGlobalInfo *SSI)
@@ -269,7 +262,7 @@ public:
void setSSI(const StackSafetyGlobalInfo *S) { SSI = S; }
DenseMap<AllocaInst *, AllocaInst *> padInterestingAllocas(
- const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument);
+ const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument);
bool sanitizeFunction(Function &F,
llvm::function_ref<const DominatorTree &()> GetDT,
llvm::function_ref<const PostDominatorTree &()> GetPDT);
@@ -304,14 +297,10 @@ public:
void tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag, size_t Size);
Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
- bool instrumentStack(
- bool ShouldDetectUseAfterScope,
- MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
- SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
- DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
- SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
- llvm::function_ref<const DominatorTree &()> GetDT,
- llvm::function_ref<const PostDominatorTree &()> GetPDT);
+ bool instrumentStack(bool ShouldDetectUseAfterScope, memtag::StackInfo &Info,
+ Value *StackTag,
+ llvm::function_ref<const DominatorTree &()> GetDT,
+ llvm::function_ref<const PostDominatorTree &()> GetPDT);
Value *readRegister(IRBuilder<> &IRB, StringRef Name);
bool instrumentLandingPads(SmallVectorImpl<Instruction *> &RetVec);
Value *getNextTagWithCall(IRBuilder<> &IRB);
@@ -1325,11 +1314,7 @@ bool HWAddressSanitizer::instrumentLandingPads(
}
bool HWAddressSanitizer::instrumentStack(
- bool ShouldDetectUseAfterScope,
- MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument,
- SmallVector<Instruction *, 4> &UnrecognizedLifetimes,
- DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> &AllocaDbgMap,
- SmallVectorImpl<Instruction *> &RetVec, Value *StackTag,
+ bool ShouldDetectUseAfterScope, memtag::StackInfo &SInfo, Value *StackTag,
llvm::function_ref<const DominatorTree &()> GetDT,
llvm::function_ref<const PostDominatorTree &()> GetPDT) {
// Ideally, we want to calculate tagged stack base pointer, and rewrite all
@@ -1339,10 +1324,10 @@ bool HWAddressSanitizer::instrumentStack(
// This generates one extra instruction per alloca use.
unsigned int I = 0;
- for (auto &KV : AllocasToInstrument) {
+ for (auto &KV : SInfo.AllocasToInstrument) {
auto N = I++;
auto *AI = KV.first;
- AllocaInfo &Info = KV.second;
+ memtag::AllocaInfo &Info = KV.second;
IRBuilder<> IRB(AI->getNextNode());
// Replace uses of the alloca with tagged address.
@@ -1356,7 +1341,7 @@ bool HWAddressSanitizer::instrumentStack(
AI->replaceUsesWithIf(Replacement,
[AILong](Use &U) { return U.getUser() != AILong; });
- for (auto *DDI : AllocaDbgMap.lookup(AI)) {
+ for (auto *DDI : Info.DbgVariableIntrinsics) {
// Prepend "tag_offset, N" to the dwarf expression.
// Tag offset logically applies to the alloca pointer, and it makes sense
// to put it at the beginning of the expression.
@@ -1376,21 +1361,22 @@ bool HWAddressSanitizer::instrumentStack(
tagAlloca(IRB, AI, UARTag, AlignedSize);
};
bool StandardLifetime =
- UnrecognizedLifetimes.empty() &&
- isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd, &GetDT(),
- ClMaxLifetimes);
+ SInfo.UnrecognizedLifetimes.empty() &&
+ memtag::isStandardLifetime(Info.LifetimeStart, Info.LifetimeEnd,
+ &GetDT(), ClMaxLifetimes);
if (ShouldDetectUseAfterScope && StandardLifetime) {
IntrinsicInst *Start = Info.LifetimeStart[0];
IRB.SetInsertPoint(Start->getNextNode());
tagAlloca(IRB, AI, Tag, Size);
- if (!forAllReachableExits(GetDT(), GetPDT(), Start, Info.LifetimeEnd,
- RetVec, TagEnd)) {
+ if (!memtag::forAllReachableExits(GetDT(), GetPDT(), Start,
+ Info.LifetimeEnd, SInfo.RetVec,
+ TagEnd)) {
for (auto *End : Info.LifetimeEnd)
End->eraseFromParent();
}
} else {
tagAlloca(IRB, AI, Tag, Size);
- for (auto *RI : RetVec)
+ for (auto *RI : SInfo.RetVec)
TagEnd(RI);
if (!StandardLifetime) {
for (auto &II : Info.LifetimeStart)
@@ -1400,7 +1386,7 @@ bool HWAddressSanitizer::instrumentStack(
}
}
}
- for (auto &I : UnrecognizedLifetimes)
+ for (auto &I : SInfo.UnrecognizedLifetimes)
I->eraseFromParent();
return true;
}
@@ -1424,7 +1410,7 @@ bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
}
DenseMap<AllocaInst *, AllocaInst *> HWAddressSanitizer::padInterestingAllocas(
- const MapVector<AllocaInst *, AllocaInfo> &AllocasToInstrument) {
+ const MapVector<AllocaInst *, memtag::AllocaInfo> &AllocasToInstrument) {
DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap;
for (auto &KV : AllocasToInstrument) {
AllocaInst *AI = KV.first;
@@ -1469,52 +1455,13 @@ bool HWAddressSanitizer::sanitizeFunction(
SmallVector<InterestingMemoryOperand, 16> OperandsToInstrument;
SmallVector<MemIntrinsic *, 16> IntrinToInstrument;
- MapVector<AllocaInst *, AllocaInfo> AllocasToInstrument;
- SmallVector<Instruction *, 8> RetVec;
SmallVector<Instruction *, 8> LandingPadVec;
- SmallVector<Instruction *, 4> UnrecognizedLifetimes;
- DenseMap<AllocaInst *, std::vector<DbgVariableIntrinsic *>> AllocaDbgMap;
- bool CallsReturnTwice = false;
+
+ memtag::StackInfoBuilder SIB(
+ [this](const AllocaInst &AI) { return isInterestingAlloca(AI); });
for (auto &Inst : instructions(F)) {
- if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
- if (CI->canReturnTwice()) {
- CallsReturnTwice = true;
- }
- }
if (InstrumentStack) {
- if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
- if (isInterestingAlloca(*AI))
- AllocasToInstrument.insert({AI, {}});
- continue;
- }
- auto *II = dyn_cast<IntrinsicInst>(&Inst);
- if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
- II->getIntrinsicID() == Intrinsic::lifetime_end)) {
- AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
- if (!AI) {
- UnrecognizedLifetimes.push_back(&Inst);
- continue;
- }
- if (!isInterestingAlloca(*AI))
- continue;
- if (II->getIntrinsicID() == Intrinsic::lifetime_start)
- AllocasToInstrument[AI].LifetimeStart.push_back(II);
- else
- AllocasToInstrument[AI].LifetimeEnd.push_back(II);
- continue;
- }
- }
-
- Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
- if (ExitUntag)
- RetVec.push_back(ExitUntag);
-
- if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
- for (Value *V : DVI->location_ops()) {
- if (auto *Alloca = dyn_cast_or_null<AllocaInst>(V))
- if (!AllocaDbgMap.count(Alloca) || AllocaDbgMap[Alloca].back() != DVI)
- AllocaDbgMap[Alloca].push_back(DVI);
- }
+ SIB.visit(Inst);
}
if (InstrumentLandingPads && isa<LandingPadInst>(Inst))
@@ -1527,6 +1474,8 @@ bool HWAddressSanitizer::sanitizeFunction(
IntrinToInstrument.push_back(MI);
}
+ memtag::StackInfo &SInfo = SIB.get();
+
initializeCallbacks(*F.getParent());
bool Changed = false;
@@ -1534,7 +1483,7 @@ bool HWAddressSanitizer::sanitizeFunction(
if (!LandingPadVec.empty())
Changed |= instrumentLandingPads(LandingPadVec);
- if (AllocasToInstrument.empty() && F.hasPersonalityFn() &&
+ if (SInfo.AllocasToInstrument.empty() && F.hasPersonalityFn() &&
F.getPersonalityFn()->getName() == kHwasanPersonalityThunkName) {
// __hwasan_personality_thunk is a no-op for functions without an
// instrumented stack, so we can drop it.
@@ -1542,7 +1491,7 @@ bool HWAddressSanitizer::sanitizeFunction(
Changed = true;
}
- if (AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
+ if (SInfo.AllocasToInstrument.empty() && OperandsToInstrument.empty() &&
IntrinToInstrument.empty())
return Changed;
@@ -1552,24 +1501,24 @@ bool HWAddressSanitizer::sanitizeFunction(
IRBuilder<> EntryIRB(InsertPt);
emitPrologue(EntryIRB,
/*WithFrameRecord*/ ClRecordStackHistory &&
- Mapping.WithFrameRecord && !AllocasToInstrument.empty());
+ Mapping.WithFrameRecord &&
+ !SInfo.AllocasToInstrument.empty());
- if (!AllocasToInstrument.empty()) {
+ if (!SInfo.AllocasToInstrument.empty()) {
Value *StackTag =
ClGenerateTagsWithCalls ? nullptr : getStackBaseTag(EntryIRB);
// Calls to functions that may return twice (e.g. setjmp) confuse the
// postdominator analysis, and will leave us to keep memory tagged after
// function return. Work around this by always untagging at every return
// statement if return_twice functions are called.
- instrumentStack(DetectUseAfterScope && !CallsReturnTwice,
- AllocasToInstrument, UnrecognizedLifetimes, AllocaDbgMap,
- RetVec, StackTag, GetDT, GetPDT);
+ instrumentStack(DetectUseAfterScope && !SInfo.CallsReturnTwice, SIB.get(),
+ StackTag, GetDT, GetPDT);
}
// Pad and align each of the allocas that we instrumented to stop small
// uninteresting allocas from hiding in instrumented alloca's padding and so
// that we have enough space to store real tags for short granules.
DenseMap<AllocaInst *, AllocaInst *> AllocaToPaddedAllocaMap =
- padInterestingAllocas(AllocasToInstrument);
+ padInterestingAllocas(SInfo.AllocasToInstrument);
if (!AllocaToPaddedAllocaMap.empty()) {
for (auto &Inst : instructions(F)) {
diff --git a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
index c51acdf52f14..05960db899bc 100644
--- a/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
+++ b/llvm/lib/Transforms/Instrumentation/MemorySanitizer.cpp
@@ -4083,8 +4083,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// Nothing to do here.
}
- void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
- const DataLayout &DL, bool isOutput) {
+ void instrumentAsmArgument(Value *Operand, Type *ElemTy, Instruction &I,
+ IRBuilder<> &IRB, const DataLayout &DL,
+ bool isOutput) {
// For each assembly argument, we check its value for being initialized.
// If the argument is a pointer, we assume it points to a single element
// of the corresponding type (or to a 8-byte word, if the type is unsized).
@@ -4096,10 +4097,9 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
assert(!isOutput);
return;
}
- Type *ElType = OpType->getPointerElementType();
- if (!ElType->isSized())
+ if (!ElemTy->isSized())
return;
- int Size = DL.getTypeStoreSize(ElType);
+ int Size = DL.getTypeStoreSize(ElemTy);
Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
@@ -4159,14 +4159,16 @@ struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
// that we won't overwrite uninit values before checking them.
for (int i = OutputArgs; i < NumOperands; i++) {
Value *Operand = CB->getOperand(i);
- instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
+ instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
+ /*isOutput*/ false);
}
// Unpoison output arguments. This must happen before the actual InlineAsm
// call, so that the shadow for memory published in the asm() statement
// remains valid.
for (int i = 0; i < OutputArgs; i++) {
Value *Operand = CB->getOperand(i);
- instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
+ instrumentAsmArgument(Operand, CB->getParamElementType(i), I, IRB, DL,
+ /*isOutput*/ true);
}
setShadow(&I, getCleanShadow(&I));
diff --git a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
index 97cafbce66f9..6ba38ca1703e 100644
--- a/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
+++ b/llvm/lib/Transforms/Scalar/ConstraintElimination.cpp
@@ -229,6 +229,12 @@ decompose(Value *V, SmallVector<PreconditionTy, 4> &Preconditions,
ConstantInt *CI;
if (match(V, m_NUWAdd(m_Value(Op0), m_ConstantInt(CI))))
return {{CI->getSExtValue(), nullptr}, {1, Op0}};
+ if (match(V, m_Add(m_Value(Op0), m_ConstantInt(CI))) && CI->isNegative()) {
+ Preconditions.emplace_back(
+ CmpInst::ICMP_UGE, Op0,
+ ConstantInt::get(Op0->getType(), CI->getSExtValue() * -1));
+ return {{CI->getSExtValue(), nullptr}, {1, Op0}};
+ }
if (match(V, m_NUWAdd(m_Value(Op0), m_Value(Op1))))
return {{0, nullptr}, {1, Op0}, {1, Op1}};
diff --git a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
index ddc747a2ca29..bc01f8b5d114 100644
--- a/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
+++ b/llvm/lib/Transforms/Scalar/InferAddressSpaces.cpp
@@ -1252,12 +1252,18 @@ bool InferAddressSpacesImpl::rewriteWithNewAddressSpaces(
}
// Otherwise, replaces the use with flat(NewV).
- if (Instruction *Inst = dyn_cast<Instruction>(V)) {
+ if (Instruction *VInst = dyn_cast<Instruction>(V)) {
// Don't create a copy of the original addrspacecast.
if (U == V && isa<AddrSpaceCastInst>(V))
continue;
- BasicBlock::iterator InsertPos = std::next(Inst->getIterator());
+ // Insert the addrspacecast after NewV.
+ BasicBlock::iterator InsertPos;
+ if (Instruction *NewVInst = dyn_cast<Instruction>(NewV))
+ InsertPos = std::next(NewVInst->getIterator());
+ else
+ InsertPos = std::next(VInst->getIterator());
+
while (isa<PHINode>(InsertPos))
++InsertPos;
U.set(new AddrSpaceCastInst(NewV, V->getType(), "", &*InsertPos));
diff --git a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
index c734611836eb..24972db404be 100644
--- a/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
+++ b/llvm/lib/Transforms/Utils/AMDGPUEmitPrintf.cpp
@@ -50,9 +50,6 @@ static Value *callPrintfBegin(IRBuilder<> &Builder, Value *Version) {
auto Int64Ty = Builder.getInt64Ty();
auto M = Builder.GetInsertBlock()->getModule();
auto Fn = M->getOrInsertFunction("__ockl_printf_begin", Int64Ty, Int64Ty);
- if (!M->getModuleFlag("amdgpu_hostcall")) {
- M->addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
- }
return Builder.CreateCall(Fn, Version);
}
diff --git a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
index 3ffd5e1dea56..132f44f88534 100644
--- a/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
+++ b/llvm/lib/Transforms/Utils/MemoryTaggingSupport.cpp
@@ -12,7 +12,10 @@
#include "llvm/Transforms/Utils/MemoryTaggingSupport.h"
+#include "llvm/Analysis/ValueTracking.h"
+
namespace llvm {
+namespace memtag {
namespace {
bool maybeReachableFromEachOther(const SmallVectorImpl<IntrinsicInst *> &Insts,
const DominatorTree *DT, size_t MaxLifetimes) {
@@ -54,4 +57,52 @@ Instruction *getUntagLocationIfFunctionExit(Instruction &Inst) {
}
return nullptr;
}
+
+void StackInfoBuilder::visit(Instruction &Inst) {
+ if (CallInst *CI = dyn_cast<CallInst>(&Inst)) {
+ if (CI->canReturnTwice()) {
+ Info.CallsReturnTwice = true;
+ }
+ }
+ if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
+ if (IsInterestingAlloca(*AI)) {
+ Info.AllocasToInstrument[AI].AI = AI;
+ Info.AllocasToInstrument[AI].OldAI = AI;
+ }
+ return;
+ }
+ auto *II = dyn_cast<IntrinsicInst>(&Inst);
+ if (II && (II->getIntrinsicID() == Intrinsic::lifetime_start ||
+ II->getIntrinsicID() == Intrinsic::lifetime_end)) {
+ AllocaInst *AI = findAllocaForValue(II->getArgOperand(1));
+ if (!AI) {
+ Info.UnrecognizedLifetimes.push_back(&Inst);
+ return;
+ }
+ if (!IsInterestingAlloca(*AI))
+ return;
+ if (II->getIntrinsicID() == Intrinsic::lifetime_start)
+ Info.AllocasToInstrument[AI].LifetimeStart.push_back(II);
+ else
+ Info.AllocasToInstrument[AI].LifetimeEnd.push_back(II);
+ return;
+ }
+ if (auto *DVI = dyn_cast<DbgVariableIntrinsic>(&Inst)) {
+ for (Value *V : DVI->location_ops()) {
+ if (auto *AI = dyn_cast_or_null<AllocaInst>(V)) {
+ if (!IsInterestingAlloca(*AI))
+ continue;
+ AllocaInfo &AInfo = Info.AllocasToInstrument[AI];
+ auto &DVIVec = AInfo.DbgVariableIntrinsics;
+ if (DVIVec.empty() || DVIVec.back() != DVI)
+ DVIVec.push_back(DVI);
+ }
+ }
+ }
+ Instruction *ExitUntag = getUntagLocationIfFunctionExit(Inst);
+ if (ExitUntag)
+ Info.RetVec.push_back(ExitUntag);
+}
+
+} // namespace memtag
} // namespace llvm
diff --git a/llvm/test/Analysis/ScalarEvolution/logical-operations.ll b/llvm/test/Analysis/ScalarEvolution/logical-operations.ll
index e1e874737693..682d99c2e349 100644
--- a/llvm/test/Analysis/ScalarEvolution/logical-operations.ll
+++ b/llvm/test/Analysis/ScalarEvolution/logical-operations.ll
@@ -463,7 +463,7 @@ define i32 @umin_seq_x_y(i32 %x, i32 %y) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %y, i32 %x)
; CHECK-NEXT: --> (%x umin %y) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> (%x umin_seq %y) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y
;
%umin = call i32 @llvm.umin(i32 %y, i32 %x)
@@ -472,13 +472,103 @@ define i32 @umin_seq_x_y(i32 %x, i32 %y) {
ret i32 %r
}
+define i32 @umin_seq_x_y_tautological(i32 %x, i32 %y) {
+; CHECK-LABEL: 'umin_seq_x_y_tautological'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_tautological
+; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %y, i32 %x)
+; CHECK-NEXT: --> (%x umin %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %umin.is.zero, i32 0, i32 %umin
+; CHECK-NEXT: --> (%x umin %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_tautological
+;
+ %umin = call i32 @llvm.umin(i32 %y, i32 %x)
+ %umin.is.zero = icmp eq i32 %umin, 0
+ %r = select i1 %umin.is.zero, i32 0, i32 %umin
+ ret i32 %r
+}
+define i32 @umin_seq_x_y_tautological_wrongtype(i32 %x, i32 %y) {
+; CHECK-LABEL: 'umin_seq_x_y_tautological_wrongtype'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_tautological_wrongtype
+; CHECK-NEXT: %umax = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+; CHECK-NEXT: --> (%x umax %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %umax.is.zero, i32 0, i32 %umax
+; CHECK-NEXT: --> (%x umax %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_tautological_wrongtype
+;
+ %umax = call i32 @llvm.umax(i32 %y, i32 %x)
+ %umax.is.zero = icmp eq i32 %umax, 0
+ %r = select i1 %umax.is.zero, i32 0, i32 %umax
+ ret i32 %r
+}
+
+define i32 @umin_seq_x_y_wrongtype0(i32 %x, i32 %y) {
+; CHECK-LABEL: 'umin_seq_x_y_wrongtype0'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_wrongtype0
+; CHECK-NEXT: %umax = call i32 @llvm.umax.i32(i32 %y, i32 %x)
+; CHECK-NEXT: --> (%x umax %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %umax
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_wrongtype0
+;
+ %umax = call i32 @llvm.umax(i32 %y, i32 %x)
+ %x.is.zero = icmp eq i32 %x, 0
+ %r = select i1 %x.is.zero, i32 0, i32 %umax
+ ret i32 %r
+}
+define i32 @umin_seq_x_y_wrongtype1(i32 %x, i32 %y) {
+; CHECK-LABEL: 'umin_seq_x_y_wrongtype1'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_wrongtype1
+; CHECK-NEXT: %smax = call i32 @llvm.smax.i32(i32 %y, i32 %x)
+; CHECK-NEXT: --> (%x smax %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %smax
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_wrongtype1
+;
+ %smax = call i32 @llvm.smax(i32 %y, i32 %x)
+ %x.is.zero = icmp eq i32 %x, 0
+ %r = select i1 %x.is.zero, i32 0, i32 %smax
+ ret i32 %r
+}
+define i32 @umin_seq_x_y_wrongtype2(i32 %x, i32 %y) {
+; CHECK-LABEL: 'umin_seq_x_y_wrongtype2'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_wrongtype2
+; CHECK-NEXT: %smin = call i32 @llvm.smin.i32(i32 %y, i32 %x)
+; CHECK-NEXT: --> (%x smin %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %smin
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_wrongtype2
+;
+ %smin = call i32 @llvm.smin(i32 %y, i32 %x)
+ %x.is.zero = icmp eq i32 %x, 0
+ %r = select i1 %x.is.zero, i32 0, i32 %smin
+ ret i32 %r
+}
+
+define i32 @umin_seq_x_y_wrongtype3(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: 'umin_seq_x_y_wrongtype3'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_wrongtype3
+; CHECK-NEXT: %umax = call i32 @llvm.umax.i32(i32 %x, i32 %z)
+; CHECK-NEXT: --> (%x umax %z) U: full-set S: full-set
+; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %umax, i32 %y)
+; CHECK-NEXT: --> ((%x umax %z) umin %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %umin
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_wrongtype3
+;
+ %umax = call i32 @llvm.umax(i32 %x, i32 %z)
+ %umin = call i32 @llvm.umin(i32 %umax, i32 %y)
+ %x.is.zero = icmp eq i32 %x, 0
+ %r = select i1 %x.is.zero, i32 0, i32 %umin
+ ret i32 %r
+}
+
define i32 @umin_seq_y_x(i32 %x, i32 %y) {
; CHECK-LABEL: 'umin_seq_y_x'
; CHECK-NEXT: Classifying expressions for: @umin_seq_y_x
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %x, i32 %y)
; CHECK-NEXT: --> (%x umin %y) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> (%y umin_seq %x) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @umin_seq_y_x
;
%umin = call i32 @llvm.umin(i32 %x, i32 %y)
@@ -487,6 +577,27 @@ define i32 @umin_seq_y_x(i32 %x, i32 %y) {
ret i32 %r
}
+define i32 @umin_seq_x_x_y_z(i32 %x, i32 %y, i32 %z) {
+; CHECK-LABEL: 'umin_seq_x_x_y_z'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_x_x_y_z
+; CHECK-NEXT: %umin0 = call i32 @llvm.umin.i32(i32 %z, i32 %x)
+; CHECK-NEXT: --> (%x umin %z) U: full-set S: full-set
+; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %umin0, i32 %y)
+; CHECK-NEXT: --> (%x umin %y umin %z) U: full-set S: full-set
+; CHECK-NEXT: %r0 = select i1 %x.is.zero, i32 0, i32 %umin
+; CHECK-NEXT: --> (%x umin_seq (%y umin %z)) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %r0
+; CHECK-NEXT: --> (%x umin_seq (%y umin %z)) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_x_y_z
+;
+ %umin0 = call i32 @llvm.umin(i32 %z, i32 %x)
+ %umin = call i32 @llvm.umin(i32 %umin0, i32 %y)
+ %x.is.zero = icmp eq i32 %x, 0
+ %r0 = select i1 %x.is.zero, i32 0, i32 %umin
+ %r = select i1 %x.is.zero, i32 0, i32 %r0
+ ret i32 %r
+}
+
define i32 @umin_seq_x_y_z(i32 %x, i32 %y, i32 %z) {
; CHECK-LABEL: 'umin_seq_x_y_z'
; CHECK-NEXT: Classifying expressions for: @umin_seq_x_y_z
@@ -495,9 +606,9 @@ define i32 @umin_seq_x_y_z(i32 %x, i32 %y, i32 %z) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %umin0, i32 %y)
; CHECK-NEXT: --> (%x umin %y umin %z) U: full-set S: full-set
; CHECK-NEXT: %r0 = select i1 %y.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r0 U: full-set S: full-set
+; CHECK-NEXT: --> (%y umin_seq (%x umin %z)) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %x.is.zero, i32 0, i32 %r0
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> (%x umin_seq %y umin_seq %z) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @umin_seq_x_y_z
;
%umin0 = call i32 @llvm.umin(i32 %z, i32 %x)
@@ -509,6 +620,32 @@ define i32 @umin_seq_x_y_z(i32 %x, i32 %y, i32 %z) {
ret i32 %r
}
+define i32 @umin_seq_a_b_c_d(i32 %a, i32 %b, i32 %c, i32 %d) {
+; CHECK-LABEL: 'umin_seq_a_b_c_d'
+; CHECK-NEXT: Classifying expressions for: @umin_seq_a_b_c_d
+; CHECK-NEXT: %umin1 = call i32 @llvm.umin.i32(i32 %c, i32 %d)
+; CHECK-NEXT: --> (%c umin %d) U: full-set S: full-set
+; CHECK-NEXT: %r1 = select i1 %c.is.zero, i32 0, i32 %umin1
+; CHECK-NEXT: --> (%c umin_seq %d) U: full-set S: full-set
+; CHECK-NEXT: %umin0 = call i32 @llvm.umin.i32(i32 %a, i32 %b)
+; CHECK-NEXT: --> (%a umin %b) U: full-set S: full-set
+; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %umin0, i32 %r1)
+; CHECK-NEXT: --> ((%c umin_seq %d) umin %a umin %b) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %d.is.zero, i32 0, i32 %umin
+; CHECK-NEXT: --> (%d umin_seq (%a umin %b umin %c)) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umin_seq_a_b_c_d
+;
+ %umin1 = call i32 @llvm.umin(i32 %c, i32 %d)
+ %c.is.zero = icmp eq i32 %c, 0
+ %r1 = select i1 %c.is.zero, i32 0, i32 %umin1
+
+ %umin0 = call i32 @llvm.umin(i32 %a, i32 %b)
+ %umin = call i32 @llvm.umin(i32 %umin0, i32 %r1)
+ %d.is.zero = icmp eq i32 %d, 0
+ %r = select i1 %d.is.zero, i32 0, i32 %umin
+ ret i32 %r
+}
+
define i32 @select_x_or_zero_expanded(i1 %c, i32 %x) {
; CHECK-LABEL: 'select_x_or_zero_expanded'
; CHECK-NEXT: Classifying expressions for: @select_x_or_zero_expanded
@@ -517,7 +654,7 @@ define i32 @select_x_or_zero_expanded(i1 %c, i32 %x) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %c.splat, i32 %x)
; CHECK-NEXT: --> ((sext i1 %c to i32) umin %x) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %v0.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> ((sext i1 %c to i32) umin_seq %x) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @select_x_or_zero_expanded
;
%c.splat = sext i1 %c to i32
@@ -537,7 +674,7 @@ define i32 @select_zero_or_x_expanded(i1 %c, i32 %y) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %c.splat.not, i32 %y)
; CHECK-NEXT: --> ((-1 + (-1 * (sext i1 %c to i32))<nsw>)<nsw> umin %y) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %v0.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 + (-1 * (sext i1 %c to i32))<nsw>)<nsw> umin_seq %y) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @select_zero_or_x_expanded
;
%c.splat = sext i1 %c to i32
@@ -557,7 +694,7 @@ define i32 @select_zero_or_x_expanded2(i1 %c, i32 %y) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %c.not.splat, i32 %y)
; CHECK-NEXT: --> ((sext i1 (true + %c) to i32) umin %y) U: full-set S: full-set
; CHECK-NEXT: %r = select i1 %v0.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: --> ((sext i1 (true + %c) to i32) umin_seq %y) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @select_zero_or_x_expanded2
;
%c.not = xor i1 %c, -1
@@ -578,9 +715,9 @@ define i32 @select_x_or_constant_expanded(i1 %c, i32 %x) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %c.splat, i32 %x.off)
; CHECK-NEXT: --> ((sext i1 %c to i32) umin (-42 + %x)) U: full-set S: full-set
; CHECK-NEXT: %r.off = select i1 %v0.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r.off U: full-set S: full-set
+; CHECK-NEXT: --> ((sext i1 %c to i32) umin_seq (-42 + %x)) U: full-set S: full-set
; CHECK-NEXT: %r = add i32 %r.off, 42
-; CHECK-NEXT: --> (42 + %r.off) U: full-set S: full-set
+; CHECK-NEXT: --> (42 + ((sext i1 %c to i32) umin_seq (-42 + %x))) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @select_x_or_constant_expanded
;
%c.splat = sext i1 %c to i32
@@ -604,9 +741,9 @@ define i32 @select_constant_or_y_expanded(i1 %c, i32 %y) {
; CHECK-NEXT: %umin = call i32 @llvm.umin.i32(i32 %c.splat.not, i32 %y.off)
; CHECK-NEXT: --> ((-42 + %y) umin (-1 + (-1 * (sext i1 %c to i32))<nsw>)<nsw>) U: full-set S: full-set
; CHECK-NEXT: %r.off = select i1 %v0.is.zero, i32 0, i32 %umin
-; CHECK-NEXT: --> %r.off U: full-set S: full-set
+; CHECK-NEXT: --> ((-1 + (-1 * (sext i1 %c to i32))<nsw>)<nsw> umin_seq (-42 + %y)) U: full-set S: full-set
; CHECK-NEXT: %r = add i32 %r.off, 42
-; CHECK-NEXT: --> (42 + %r.off) U: full-set S: full-set
+; CHECK-NEXT: --> (42 + ((-1 + (-1 * (sext i1 %c to i32))<nsw>)<nsw> umin_seq (-42 + %y))) U: full-set S: full-set
; CHECK-NEXT: Determining loop execution counts for: @select_constant_or_y_expanded
;
%c.splat = sext i1 %c to i32
@@ -620,3 +757,6 @@ define i32 @select_constant_or_y_expanded(i1 %c, i32 %y) {
}
declare i32 @llvm.umin(i32, i32)
+declare i32 @llvm.umax(i32, i32)
+declare i32 @llvm.smin(i32, i32)
+declare i32 @llvm.smax(i32, i32)
diff --git a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
index 8259a87c888d..208f369ac4a9 100644
--- a/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
+++ b/llvm/test/Analysis/ScalarEvolution/min-max-exprs.ll
@@ -1,3 +1,4 @@
+; NOTE: Assertions have been autogenerated by utils/update_analyze_test_checks.py
; RUN: opt "-passes=print<scalar-evolution>" -disable-output < %s 2>&1 | FileCheck %s
;
; This checks if the min and max expressions are properly recognized by
@@ -16,6 +17,41 @@
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
define void @f(i32* %A, i32 %N) {
+; CHECK-LABEL: 'f'
+; CHECK-NEXT: Classifying expressions for: @f
+; CHECK-NEXT: %i.0 = phi i32 [ 0, %bb ], [ %tmp23, %bb2 ]
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb1> U: [0,-2147483648) S: [0,-2147483648) Exits: (0 smax %N) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %i.0.1 = sext i32 %i.0 to i64
+; CHECK-NEXT: --> {0,+,1}<nuw><nsw><%bb1> U: [0,2147483648) S: [0,2147483648) Exits: (zext i32 (0 smax %N) to i64) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp3 = add nuw nsw i32 %i.0, 3
+; CHECK-NEXT: --> {3,+,1}<nuw><%bb1> U: [3,-2147483645) S: [3,-2147483645) Exits: (3 + (0 smax %N))<nuw> LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp5 = sext i32 %tmp3 to i64
+; CHECK-NEXT: --> (sext i32 {3,+,1}<nuw><%bb1> to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: (sext i32 (3 + (0 smax %N))<nuw> to i64) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp6 = sext i32 %N to i64
+; CHECK-NEXT: --> (sext i32 %N to i64) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: (sext i32 %N to i64) LoopDispositions: { %bb1: Invariant }
+; CHECK-NEXT: %tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6
+; CHECK-NEXT: --> ((sext i32 {3,+,1}<nuw><%bb1> to i64) smin (sext i32 %N to i64)) U: [-2147483648,2147483648) S: [-2147483648,2147483648) Exits: ((sext i32 (3 + (0 smax %N))<nuw> to i64) smin (sext i32 %N to i64)) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
+; CHECK-NEXT: --> ((4 * ((sext i32 {3,+,1}<nuw><%bb1> to i64) smin (sext i32 %N to i64)))<nsw> + %A) U: full-set S: full-set Exits: ((4 * ((sext i32 (3 + (0 smax %N))<nuw> to i64) smin (sext i32 %N to i64)))<nsw> + %A) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp12 = load i32, i32* %tmp11, align 4
+; CHECK-NEXT: --> %tmp12 U: full-set S: full-set Exits: <<Unknown>> LoopDispositions: { %bb1: Variant }
+; CHECK-NEXT: %tmp13 = shl nsw i32 %tmp12, 1
+; CHECK-NEXT: --> (2 * %tmp12) U: [0,-1) S: [-2147483648,2147483647) Exits: <<Unknown>> LoopDispositions: { %bb1: Variant }
+; CHECK-NEXT: %tmp17 = add nsw i64 %i.0.1, -3
+; CHECK-NEXT: --> {-3,+,1}<nsw><%bb1> U: [-3,2147483645) S: [-3,2147483645) Exits: (-3 + (zext i32 (0 smax %N) to i64))<nsw> LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp19 = select i1 %tmp14, i64 0, i64 %tmp17
+; CHECK-NEXT: --> (-3 + (3 smax {0,+,1}<nuw><nsw><%bb1>))<nsw> U: [0,2147483645) S: [0,2147483645) Exits: (-3 + (3 smax (zext i32 (0 smax %N) to i64)))<nsw> LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
+; CHECK-NEXT: --> (-12 + (4 * (3 smax {0,+,1}<nuw><nsw><%bb1>))<nuw><nsw> + %A) U: full-set S: full-set Exits: (-12 + (4 * (3 smax (zext i32 (0 smax %N) to i64)))<nuw><nsw> + %A) LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: %tmp23 = add nuw nsw i32 %i.0, 1
+; CHECK-NEXT: --> {1,+,1}<nuw><%bb1> U: [1,-2147483647) S: [1,-2147483647) Exits: (1 + (0 smax %N))<nuw> LoopDispositions: { %bb1: Computable }
+; CHECK-NEXT: Determining loop execution counts for: @f
+; CHECK-NEXT: Loop %bb1: backedge-taken count is (0 smax %N)
+; CHECK-NEXT: Loop %bb1: max backedge-taken count is 2147483647
+; CHECK-NEXT: Loop %bb1: Predicated backedge-taken count is (0 smax %N)
+; CHECK-NEXT: Predicates:
+; CHECK: Loop %bb1: Trip multiple is 1
+;
bb:
br label %bb1
@@ -32,8 +68,6 @@ bb2: ; preds = %bb1
%tmp6 = sext i32 %N to i64
%tmp9 = select i1 %tmp4, i64 %tmp5, i64 %tmp6
; min(N, i+3)
-; CHECK: select i1 %tmp4, i64 %tmp5, i64 %tmp6
-; CHECK-NEXT: --> ((sext i32 {3,+,1}<nuw><%bb1> to i64) smin (sext i32 %N to i64))
%tmp11 = getelementptr inbounds i32, i32* %A, i64 %tmp9
%tmp12 = load i32, i32* %tmp11, align 4
%tmp13 = shl nsw i32 %tmp12, 1
@@ -41,8 +75,6 @@ bb2: ; preds = %bb1
%tmp17 = add nsw i64 %i.0.1, -3
%tmp19 = select i1 %tmp14, i64 0, i64 %tmp17
; max(0, i - 3)
-; CHECK: select i1 %tmp14, i64 0, i64 %tmp17
-; CHECK-NEXT: --> (-3 + (3 smax {0,+,1}<nuw><nsw><%bb1>))
%tmp21 = getelementptr inbounds i32, i32* %A, i64 %tmp19
store i32 %tmp13, i32* %tmp21, align 4
%tmp23 = add nuw nsw i32 %i.0, 1
@@ -51,3 +83,133 @@ bb2: ; preds = %bb1
bb24: ; preds = %bb1
ret void
}
+
+define i8 @umax_basic_eq_off1(i8 %x, i8 %y) {
+; CHECK-LABEL: 'umax_basic_eq_off1'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_eq_off1
+; CHECK-NEXT: %lhs = add i8 %y, 1
+; CHECK-NEXT: --> (1 + %y) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+; CHECK-NEXT: --> ((1 umax %x) + %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_eq_off1
+;
+ %x.is.zero = icmp eq i8 %x, 0
+ %lhs = add i8 %y, 1
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+ ret i8 %r
+}
+define i8 @umax_basic_ne_off1(i8 %x, i8 %y) {
+; CHECK-LABEL: 'umax_basic_ne_off1'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_ne_off1
+; CHECK-NEXT: %lhs = add i8 %y, 1
+; CHECK-NEXT: --> (1 + %y) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %rhs, i8 %lhs
+; CHECK-NEXT: --> ((1 umax %x) + %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_ne_off1
+;
+ %x.is.zero = icmp ne i8 %x, 0
+ %lhs = add i8 %y, 1
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %rhs, i8 %lhs
+ ret i8 %r
+}
+
+define i8 @umax_basic_eq_off0(i8 %x, i8 %y) {
+; CHECK-LABEL: 'umax_basic_eq_off0'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_eq_off0
+; CHECK-NEXT: %lhs = add i8 %y, 0
+; CHECK-NEXT: --> %y U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_eq_off0
+;
+ %x.is.zero = icmp eq i8 %x, 0
+ %lhs = add i8 %y, 0
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+ ret i8 %r
+}
+
+define i8 @umax_basic_eq_off2(i8 %x, i8 %y) {
+; CHECK-LABEL: 'umax_basic_eq_off2'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_eq_off2
+; CHECK-NEXT: %lhs = add i8 %y, 2
+; CHECK-NEXT: --> (2 + %y) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_eq_off2
+;
+ %x.is.zero = icmp eq i8 %x, 0
+ %lhs = add i8 %y, 2
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+ ret i8 %r
+}
+
+define i8 @umax_basic_eq_var_off(i8 %x, i8 %y, i8 %c) {
+; CHECK-LABEL: 'umax_basic_eq_var_off'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_eq_var_off
+; CHECK-NEXT: %lhs = add i8 %y, %c
+; CHECK-NEXT: --> (%y + %c) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> (%x + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+; CHECK-NEXT: --> %r U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_eq_var_off
+;
+ %x.is.zero = icmp eq i8 %x, 0
+ %lhs = add i8 %y, %c
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+ ret i8 %r
+}
+
+define i8 @umax_basic_eq_narrow(i4 %x.narrow, i8 %y) {
+; CHECK-LABEL: 'umax_basic_eq_narrow'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_eq_narrow
+; CHECK-NEXT: %x = zext i4 %x.narrow to i8
+; CHECK-NEXT: --> (zext i4 %x.narrow to i8) U: [0,16) S: [0,16)
+; CHECK-NEXT: %lhs = add i8 %y, 1
+; CHECK-NEXT: --> (1 + %y) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> ((zext i4 %x.narrow to i8) + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+; CHECK-NEXT: --> ((1 umax (zext i4 %x.narrow to i8)) + %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_eq_narrow
+;
+ %x = zext i4 %x.narrow to i8
+ %x.is.zero = icmp eq i4 %x.narrow, 0
+ %lhs = add i8 %y, 1
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %lhs, i8 %rhs
+ ret i8 %r
+}
+define i8 @umax_basic_ne_narrow(i4 %x.narrow, i8 %y) {
+; CHECK-LABEL: 'umax_basic_ne_narrow'
+; CHECK-NEXT: Classifying expressions for: @umax_basic_ne_narrow
+; CHECK-NEXT: %x = zext i4 %x.narrow to i8
+; CHECK-NEXT: --> (zext i4 %x.narrow to i8) U: [0,16) S: [0,16)
+; CHECK-NEXT: %lhs = add i8 %y, 1
+; CHECK-NEXT: --> (1 + %y) U: full-set S: full-set
+; CHECK-NEXT: %rhs = add i8 %x, %y
+; CHECK-NEXT: --> ((zext i4 %x.narrow to i8) + %y) U: full-set S: full-set
+; CHECK-NEXT: %r = select i1 %x.is.zero, i8 %rhs, i8 %lhs
+; CHECK-NEXT: --> ((1 umax (zext i4 %x.narrow to i8)) + %y) U: full-set S: full-set
+; CHECK-NEXT: Determining loop execution counts for: @umax_basic_ne_narrow
+;
+ %x = zext i4 %x.narrow to i8
+ %x.is.zero = icmp ne i4 %x.narrow, 0
+ %lhs = add i8 %y, 1
+ %rhs = add i8 %x, %y
+ %r = select i1 %x.is.zero, i8 %rhs, i8 %lhs
+ ret i8 %r
+}
diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
index 117766814b33..2127229d6a08 100644
--- a/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
+++ b/llvm/test/CodeGen/AArch64/GlobalISel/legalizer-info-validation.mir
@@ -131,6 +131,10 @@
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
# DEBUG-NEXT: .. imm index coverage check SKIPPED: user-defined predicate detected
#
+# DEBUG-NEXT: G_INTRINSIC_FPTRUNC_ROUND (opcode {{[0-9]+}}): 2 type indices, 0 imm indices
+# DEBUG-NEXT: .. type index coverage check SKIPPED: no rules defined
+# DEBUG-NEXT: .. imm index coverage check SKIPPED: no rules defined
+#
# DEBUG-NEXT: G_INTRINSIC_TRUNC (opcode {{[0-9]+}}): 1 type index, 0 imm indices
# DEBUG-NEXT: .. opcode {{[0-9]+}} is aliased to {{[0-9]+}}
# DEBUG-NEXT: .. type index coverage check SKIPPED: user-defined predicate detected
diff --git a/llvm/test/CodeGen/AArch64/addsub.ll b/llvm/test/CodeGen/AArch64/addsub.ll
index b95c15ac6d07..6d150025b368 100644
--- a/llvm/test/CodeGen/AArch64/addsub.ll
+++ b/llvm/test/CodeGen/AArch64/addsub.ll
@@ -406,4 +406,216 @@ define i64 @addl_0x80000000(i64 %a) {
ret i64 %b
}
-; TODO: adds/subs
+; ADDS and SUBS Optimizations
+; Checks with all types first, then checks that only EQ and NE optimize
+define i1 @eq_i(i32 %0) {
+; CHECK-LABEL: eq_i:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmp w8, #273
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i32 %0, 1118481
+ ret i1 %2
+}
+
+define i1 @eq_l(i64 %0) {
+; CHECK-LABEL: eq_l:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub x8, x0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmp x8, #273
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i64 %0, 1118481
+ ret i1 %2
+}
+
+define i1 @ne_i(i32 %0) {
+; CHECK-LABEL: ne_i:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmp w8, #273
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
+ %2 = icmp ne i32 %0, 1118481
+ ret i1 %2
+}
+
+define i1 @ne_l(i64 %0) {
+; CHECK-LABEL: ne_l:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub x8, x0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmp x8, #273
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
+ %2 = icmp ne i64 %0, 1118481
+ ret i1 %2
+}
+
+define i1 @eq_in(i32 %0) {
+; CHECK-LABEL: eq_in:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add w8, w0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmn w8, #273
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i32 %0, -1118481
+ ret i1 %2
+}
+
+define i1 @eq_ln(i64 %0) {
+; CHECK-LABEL: eq_ln:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x8, x0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmn x8, #273
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i64 %0, -1118481
+ ret i1 %2
+}
+
+define i1 @ne_in(i32 %0) {
+; CHECK-LABEL: ne_in:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add w8, w0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmn w8, #273
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
+ %2 = icmp ne i32 %0, -1118481
+ ret i1 %2
+}
+
+define i1 @ne_ln(i64 %0) {
+; CHECK-LABEL: ne_ln:
+; CHECK: // %bb.0:
+; CHECK-NEXT: add x8, x0, #273, lsl #12 // =1118208
+; CHECK-NEXT: cmn x8, #273
+; CHECK-NEXT: cset w0, ne
+; CHECK-NEXT: ret
+ %2 = icmp ne i64 %0, -1118481
+ ret i1 %2
+}
+
+define i1 @reject_eq(i32 %0) {
+; CHECK-LABEL: reject_eq:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #51712
+; CHECK-NEXT: movk w8, #15258, lsl #16
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: cset w0, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i32 %0, 1000000000
+ ret i1 %2
+}
+
+define i1 @reject_non_eqne_csinc(i32 %0) {
+; CHECK-LABEL: reject_non_eqne_csinc:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #4369
+; CHECK-NEXT: movk w8, #17, lsl #16
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: cset w0, lo
+; CHECK-NEXT: ret
+ %2 = icmp ult i32 %0, 1118481
+ ret i1 %2
+}
+
+define i32 @accept_csel(i32 %0) {
+; CHECK-LABEL: accept_csel:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w9, w0, #273, lsl #12 // =1118208
+; CHECK-NEXT: mov w8, #17
+; CHECK-NEXT: cmp w9, #273
+; CHECK-NEXT: mov w9, #11
+; CHECK-NEXT: csel w0, w9, w8, eq
+; CHECK-NEXT: ret
+ %2 = icmp eq i32 %0, 1118481
+ %3 = select i1 %2, i32 11, i32 17
+ ret i32 %3
+}
+
+define i32 @reject_non_eqne_csel(i32 %0) {
+; CHECK-LABEL: reject_non_eqne_csel:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #4369
+; CHECK-NEXT: mov w9, #11
+; CHECK-NEXT: movk w8, #17, lsl #16
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: mov w8, #17
+; CHECK-NEXT: csel w0, w9, w8, lo
+; CHECK-NEXT: ret
+ %2 = icmp ult i32 %0, 1118481
+ %3 = select i1 %2, i32 11, i32 17
+ ret i32 %3
+}
+
+declare void @fooy()
+
+define void @accept_branch(i32 %0) {
+; CHECK-LABEL: accept_branch:
+; CHECK: // %bb.0:
+; CHECK-NEXT: sub w8, w0, #291, lsl #12 // =1191936
+; CHECK-NEXT: cmp w8, #1110
+; CHECK-NEXT: b.eq .LBB32_2
+; CHECK-NEXT: // %bb.1:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB32_2:
+; CHECK-NEXT: b fooy
+ %2 = icmp ne i32 %0, 1193046
+ br i1 %2, label %4, label %3
+3: ; preds = %1
+ tail call void @fooy()
+ br label %4
+4: ; preds = %3, %1
+ ret void
+}
+
+define void @reject_non_eqne_branch(i32 %0) {
+; CHECK-LABEL: reject_non_eqne_branch:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #13398
+; CHECK-NEXT: movk w8, #18, lsl #16
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: b.le .LBB33_2
+; CHECK-NEXT: // %bb.1:
+; CHECK-NEXT: ret
+; CHECK-NEXT: .LBB33_2:
+; CHECK-NEXT: b fooy
+ %2 = icmp sgt i32 %0, 1193046
+ br i1 %2, label %4, label %3
+3: ; preds = %1
+ tail call void @fooy()
+ br label %4
+4: ; preds = %3, %1
+ ret void
+}
+
+define i32 @reject_multiple_usages(i32 %0) {
+; CHECK-LABEL: reject_multiple_usages:
+; CHECK: // %bb.0:
+; CHECK-NEXT: mov w8, #4369
+; CHECK-NEXT: mov w9, #3
+; CHECK-NEXT: movk w8, #17, lsl #16
+; CHECK-NEXT: mov w10, #17
+; CHECK-NEXT: cmp w0, w8
+; CHECK-NEXT: mov w8, #9
+; CHECK-NEXT: mov w11, #12
+; CHECK-NEXT: csel w8, w8, w9, eq
+; CHECK-NEXT: csel w9, w11, w10, hi
+; CHECK-NEXT: add w8, w8, w9
+; CHECK-NEXT: mov w9, #53312
+; CHECK-NEXT: movk w9, #2, lsl #16
+; CHECK-NEXT: cmp w0, w9
+; CHECK-NEXT: mov w9, #26304
+; CHECK-NEXT: movk w9, #1433, lsl #16
+; CHECK-NEXT: csel w0, w8, w9, hi
+; CHECK-NEXT: ret
+ %2 = icmp eq i32 %0, 1118481
+ %3 = icmp ugt i32 %0, 1118481
+ %4 = select i1 %2, i32 9, i32 3
+ %5 = select i1 %3, i32 12, i32 17
+ %6 = add i32 %4, %5
+ %7 = icmp ugt i32 %0, 184384
+ %8 = select i1 %7, i32 %6, i32 93939392
+ ret i32 %8
+}
diff --git a/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll b/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
index f5a4a45adbc0..0c2cf1778722 100644
--- a/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
+++ b/llvm/test/CodeGen/AArch64/arm64-instruction-mix-remarks.ll
@@ -12,8 +12,8 @@
; YAML: - INST_add: '2'
; YAML: - INST_b.: '1'
; YAML: - INST_ldr: '1'
-; YAML: - INST_movk: '1'
-; YAML: - INST_movz: '1'
+; YAML: - INST_orr: '1'
+; YAML: - INST_sub: '1'
; YAML: - INST_subs: '1'
; YAML: Name: InstructionMix
@@ -27,13 +27,12 @@
define i32 @foo(i32* %ptr, i32 %x, i64 %y) !dbg !3 {
; CHECK-LABEL: foo:
; CHECK: ; %bb.0: ; %entry
-; CHECK-NEXT: ldr w10, [x0]
+; CHECK-NEXT: ldr w9, [x0]
; CHECK-NEXT: mov x8, x0
-; CHECK-NEXT: mov w9, #16959
-; CHECK-NEXT: movk w9, #15, lsl #16
-; CHECK-NEXT: add w0, w10, w1
-; CHECK-NEXT: add x10, x0, x2
-; CHECK-NEXT: cmp x10, x9
+; CHECK-NEXT: add w0, w9, w1
+; CHECK-NEXT: add x9, x0, x2
+; CHECK-NEXT: sub x9, x9, #244, lsl #12 ; =999424
+; CHECK-NEXT: cmp x9, #575
; CHECK-NEXT: b.eq LBB0_2
; CHECK-NEXT: ; %bb.1: ; %else
; CHECK-NEXT: mul w9, w0, w1
diff --git a/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll b/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
deleted file mode 100644
index b439fdbeb0da..000000000000
--- a/llvm/test/CodeGen/AArch64/tailcall-ssp-split-debug.ll
+++ /dev/null
@@ -1,40 +0,0 @@
-; RUN: llc -mtriple=arm64-apple-ios %s -o - | FileCheck %s
-
-define swifttailcc void @foo(i8* %call) ssp {
-; CHECK-LABEL: foo:
- %var = alloca [28 x i8], align 16
- br i1 undef, label %if.then, label %if.end
-
-if.then:
- ret void
-
-if.end:
- ; CHECK: mov x[[NULL:[0-9]+]], xzr
- ; CHECK: ldr [[FPTR:x[0-9]+]], [x[[NULL]]]
- ; CHECK: br [[FPTR]]
- call void @llvm.dbg.value(metadata i8* %call, metadata !19, metadata !DIExpression()), !dbg !21
- %fptr = load void (i8*)*, void (i8*)** null, align 8
- musttail call swifttailcc void %fptr(i8* null)
- ret void
-}
-
-declare i8* @pthread_getspecific()
-
-; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
-declare void @llvm.dbg.value(metadata, metadata, metadata) #1
-
-!llvm.module.flags = !{!2}
-!llvm.dbg.cu = !{!11}
-
-!2 = !{i32 2, !"Debug Info Version", i32 3}
-!11 = distinct !DICompileUnit(language: DW_LANG_C99, file: !12, producer: "Apple clang version 13.1.6 (clang-1316.0.17.4)", isOptimized: true, runtimeVersion: 0, emissionKind: FullDebug, enums: !13, splitDebugInlining: false, nameTableKind: None, sysroot: "/Library/Developer/CommandLineTools/SDKs/MacOSX12.3.sdk", sdk: "MacOSX12.3.sdk")
-!12 = !DIFile(filename: "tmp.c", directory: "/Users/tim/llvm-internal/llvm-project/build")
-!13 = !{}
-!14 = !{!"Apple clang version 13.1.6 (clang-1316.0.17.4)"}
-!15 = distinct !DISubprogram(name: "bar", scope: !12, file: !12, line: 3, type: !16, scopeLine: 3, flags: DIFlagAllCallsDescribed, spFlags: DISPFlagDefinition | DISPFlagOptimized, unit: !11, retainedNodes: !18)
-!16 = !DISubroutineType(types: !17)
-!17 = !{null}
-!18 = !{!19}
-!19 = !DILocalVariable(name: "var", scope: !15, file: !12, line: 4, type: !20)
-!20 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
-!21 = !DILocation(line: 0, scope: !15)
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
index 8e0f3fc47989..7521a3c2c613 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/call-outgoing-stack-args.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -global-isel -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope -check-prefix=MUBUF %s
-; RUN: llc -global-isel -amdgpu-enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope -check-prefix=FLATSCR %s
+; RUN: llc -global-isel -mattr=+enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -o - %s | FileCheck -enable-var-scope -check-prefix=FLATSCR %s
; Test end-to-end codegen for outgoing arguments passed on the
; stack. This test is likely redundant when all DAG and GlobalISel
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.gfx.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.gfx.ll
index 7c9103226b7e..8bc4111abe1a 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.gfx.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch-init.gfx.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -global-isel -amdgpu-enable-flat-scratch -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=MESA %s
-; RUN: llc -global-isel -amdgpu-enable-flat-scratch -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=PAL %s
+; RUN: llc -global-isel -mattr=+enable-flat-scratch -mtriple=amdgcn-mesa-mesa3d -mcpu=gfx900 < %s | FileCheck -check-prefix=MESA %s
+; RUN: llc -global-isel -mattr=+enable-flat-scratch -mtriple=amdgcn-amd-amdpal -mcpu=gfx900 < %s | FileCheck -check-prefix=PAL %s
; Test that the initialization for flat_scratch doesn't crash. PAL
; doesn't add a user SGPR for initializing flat_scratch, mesa does
diff --git a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
index 04cc0c56292e..a38017a70954 100644
--- a/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/GlobalISel/flat-scratch.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx1030 -global-isel -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -global-isel -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -global-isel -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GFX10 %s
define amdgpu_kernel void @store_load_sindex_kernel(i32 %idx) {
; GFX9-LABEL: store_load_sindex_kernel:
diff --git a/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir b/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
index be034eb96799..033050698f4c 100644
--- a/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
+++ b/llvm/test/CodeGen/AMDGPU/accvgpr-copy.mir
@@ -408,8 +408,8 @@ body: |
; GFX908-LABEL: name: s_to_a
; GFX908: liveins: $sgpr0
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr0, implicit $exec
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr0, implicit $exec
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0
; GFX90A-LABEL: name: s_to_a
; GFX90A: liveins: $sgpr0
@@ -430,10 +430,10 @@ body: |
; GFX908-LABEL: name: s2_to_a2
; GFX908: liveins: $sgpr0_sgpr1
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit killed $sgpr0_sgpr1
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr1, implicit $exec, implicit killed $sgpr0_sgpr1
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1
; GFX90A-LABEL: name: s2_to_a2
; GFX90A: liveins: $sgpr0_sgpr1
@@ -456,12 +456,12 @@ body: |
; GFX908-LABEL: name: s3_to_a3
; GFX908: liveins: $sgpr0_sgpr1_sgpr2
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr2, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2
; GFX90A-LABEL: name: s3_to_a3
; GFX90A: liveins: $sgpr0_sgpr1_sgpr2
@@ -486,14 +486,14 @@ body: |
; GFX908-LABEL: name: s4_to_a4
; GFX908: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr3, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2_agpr3
; GFX90A-LABEL: name: s4_to_a4
; GFX90A: liveins: $sgpr0_sgpr1_sgpr2_sgpr3
@@ -520,18 +520,18 @@ body: |
; GFX908-LABEL: name: s6_to_a6
; GFX908: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr5, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5
; GFX90A-LABEL: name: s6_to_a6
; GFX90A: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5
@@ -562,22 +562,22 @@ body: |
; GFX908-LABEL: name: s8_to_a8
; GFX908: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr5, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr6, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr7, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr5, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr6, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr7, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
; GFX90A-LABEL: name: s8_to_a8
; GFX90A: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7
@@ -612,38 +612,38 @@ body: |
; GFX908-LABEL: name: s16_to_a16
; GFX908: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr5, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr6, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr7, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr10 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr11, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr11 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr12, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr12 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr13, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr13 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr14, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr14 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr15, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
- ; GFX908-NEXT: $agpr15 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr4, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr5, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr6, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr7, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr8, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr9, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr10, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr10 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr11, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr11 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr12, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr12 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr13, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr13 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr14, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr14 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 killed $sgpr15, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
+ ; GFX908-NEXT: $agpr15 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
; GFX90A-LABEL: name: s16_to_a16
; GFX90A: liveins: $sgpr0_sgpr1_sgpr2_sgpr3_sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10_sgpr11_sgpr12_sgpr13_sgpr14_sgpr15
@@ -692,8 +692,8 @@ body: |
bb.0:
; GFX908-LABEL: name: a_to_a
; GFX908: $agpr1 = IMPLICIT_DEF
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 killed $agpr1, implicit $exec
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 killed $agpr1, implicit $exec
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0
; GFX90A-LABEL: name: a_to_a
; GFX90A: $agpr1 = IMPLICIT_DEF
@@ -713,11 +713,11 @@ body: |
; GFX908-LABEL: name: a2_to_a2_kill
; GFX908: liveins: $agpr0_agpr1
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit-def $agpr1_agpr2
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec, implicit-def $agpr1_agpr2
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr1, implicit $agpr2, implicit $agpr3
; GFX90A-LABEL: name: a2_to_a2_kill
; GFX90A: liveins: $agpr0_agpr1
@@ -740,12 +740,12 @@ body: |
; GFX908-LABEL: name: a3_to_a3_nonoverlap_kill
; GFX908: liveins: $agpr4_agpr5_agpr6
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr4_agpr5_agpr6
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr4_agpr5_agpr6
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 killed $agpr6, implicit $exec, implicit killed $agpr4_agpr5_agpr6
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr4_agpr5_agpr6
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr0_agpr1_agpr2
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr4_agpr5_agpr6
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 killed $agpr6, implicit $exec, implicit killed $agpr4_agpr5_agpr6
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2
; GFX90A-LABEL: name: a3_to_a3_nonoverlap_kill
; GFX90A: liveins: $agpr4_agpr5_agpr6
@@ -767,11 +767,11 @@ body: |
; GFX908-LABEL: name: a3_to_a3_overlap_kill
; GFX908: liveins: $agpr1_agpr2_agpr3
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 $vgpr32, implicit $exec, implicit-def $agpr0_agpr1_agpr2
- ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 $vgpr32, implicit $exec, implicit $agpr1_agpr2_agpr3
- ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr2, implicit $exec
+ ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr0 = V_ACCVGPR_WRITE_B32_e64 $vgpr2, implicit $exec, implicit-def $agpr0_agpr1_agpr2
+ ; GFX908-NEXT: $agpr1 = V_ACCVGPR_WRITE_B32_e64 $vgpr2, implicit $exec, implicit $agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0_agpr1_agpr2, implicit $vgpr1
; GFX90A-LABEL: name: a3_to_a3_overlap_kill
@@ -794,13 +794,13 @@ body: |
bb.0:
; GFX908-LABEL: name: a4_to_a4
; GFX908: $agpr0_agpr1_agpr2_agpr3 = IMPLICIT_DEF
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit-def $agpr2_agpr3_agpr4_agpr5
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr2, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec, implicit-def $agpr2_agpr3_agpr4_agpr5
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr2, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr2_agpr3_agpr4_agpr5
; GFX90A-LABEL: name: a4_to_a4
; GFX90A: $agpr0_agpr1_agpr2_agpr3 = IMPLICIT_DEF
@@ -823,13 +823,13 @@ body: |
; GFX908-LABEL: name: a4_to_a4_overlap
; GFX908: liveins: $agpr0_agpr1_agpr2_agpr3
; GFX908-NEXT: {{ $}}
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit-def $agpr2_agpr3_agpr4_agpr5
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 $vgpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr2, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec, implicit-def $agpr2_agpr3_agpr4_agpr5
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 $vgpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $vgpr2 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr3 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr2, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr2 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr0, implicit $agpr1, implicit $agpr2, implicit $agpr3, implicit $agpr4, implicit $agpr5
; GFX90A-LABEL: name: a4_to_a4_overlap
; GFX90A: liveins: $agpr0_agpr1_agpr2_agpr3
@@ -850,22 +850,22 @@ body: |
bb.0:
; GFX908-LABEL: name: a8_to_a8
; GFX908: $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 = IMPLICIT_DEF
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr15 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr14 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr13 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr12 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr11 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr10 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr15 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec, implicit-def $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr14 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr13 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr12 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr11 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr10 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr9 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $agpr8 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
; GFX90A-LABEL: name: a8_to_a8
; GFX90A: $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7 = IMPLICIT_DEF
@@ -891,38 +891,38 @@ body: |
; GFX908-LABEL: name: a16_to_a16
; GFX908: $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = IMPLICIT_DEF
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr15, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr31 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr14, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr30 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr13, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr29 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr12, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr28 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr11, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr27 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr10, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr26 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr9, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr25 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr8, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr24 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr23 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr22 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr21 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr20 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr19 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr18 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr17 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
- ; GFX908-NEXT: $agpr16 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr15, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr31 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr14, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr30 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr13, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr29 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr12, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr28 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr11, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr27 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr10, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr26 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr9, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr25 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr8, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr24 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr7, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr23 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr6, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr22 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr5, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr21 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr4, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr20 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr19 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr18 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr17 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15
+ ; GFX908-NEXT: $agpr16 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr16_agpr17_agpr18_agpr19_agpr20_agpr21_agpr22_agpr23_agpr24_agpr25_agpr26_agpr27_agpr28_agpr29_agpr30_agpr31
; GFX90A-LABEL: name: a16_to_a16
; GFX90A: $agpr0_agpr1_agpr2_agpr3_agpr4_agpr5_agpr6_agpr7_agpr8_agpr9_agpr10_agpr11_agpr12_agpr13_agpr14_agpr15 = IMPLICIT_DEF
@@ -986,14 +986,14 @@ body: |
; GFX908: liveins: $agpr0, $sgpr2_sgpr3
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: S_NOP 0, implicit-def dead $sgpr0_sgpr1
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr0, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr4_agpr5_agpr6_agpr7, implicit $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-LABEL: name: copy_sgpr_to_agpr_tuple
; GFX90A: liveins: $agpr0, $sgpr2_sgpr3
@@ -1023,14 +1023,14 @@ body: |
; GFX908: liveins: $agpr0, $sgpr2_sgpr3
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: S_NOP 0, implicit-def dead $sgpr0_sgpr1
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 $sgpr3, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $vgpr1 = V_MOV_B32_e32 $sgpr2, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_MOV_B32_e32 $sgpr1, implicit $exec, implicit $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_MOV_B32_e32 killed $sgpr0, implicit $exec, implicit killed $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr4_agpr5_agpr6_agpr7
; GFX90A-LABEL: name: copy_sgpr_to_agpr_tuple_kill
; GFX90A: liveins: $agpr0, $sgpr2_sgpr3
@@ -1061,14 +1061,14 @@ body: |
; GFX908: liveins: $agpr0, $agpr2_agpr3
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: S_NOP 0, implicit-def dead $agpr0_agpr1
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr0, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr4_agpr5_agpr6_agpr7, implicit $agpr0_agpr1_agpr2_agpr3
; GFX90A-LABEL: name: copy_agpr_to_agpr_tuple
; GFX90A: liveins: $agpr0, $agpr2_agpr3
@@ -1095,14 +1095,14 @@ body: |
; GFX908: liveins: $agpr0, $agpr2_agpr3
; GFX908-NEXT: {{ $}}
; GFX908-NEXT: S_NOP 0, implicit-def dead $agpr0_agpr1
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
- ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec
- ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
- ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3
- ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 $agpr3, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr7 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit-def $agpr4_agpr5_agpr6_agpr7
+ ; GFX908-NEXT: $vgpr1 = V_ACCVGPR_READ_B32_e64 $agpr2, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr6 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr1, implicit $exec
+ ; GFX908-NEXT: $vgpr0 = V_ACCVGPR_READ_B32_e64 $agpr1, implicit $exec, implicit $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr5 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr0, implicit $exec
+ ; GFX908-NEXT: $vgpr32 = V_ACCVGPR_READ_B32_e64 killed $agpr0, implicit $exec, implicit killed $agpr0_agpr1_agpr2_agpr3
+ ; GFX908-NEXT: $agpr4 = V_ACCVGPR_WRITE_B32_e64 killed $vgpr32, implicit $exec, implicit $exec
; GFX908-NEXT: S_ENDPGM 0, implicit $agpr4_agpr5_agpr6_agpr7
; GFX90A-LABEL: name: copy_agpr_to_agpr_tuple_kill
; GFX90A: liveins: $agpr0, $agpr2_agpr3
diff --git a/llvm/test/CodeGen/AMDGPU/accvgpr-spill-scc-clobber.mir b/llvm/test/CodeGen/AMDGPU/accvgpr-spill-scc-clobber.mir
index 724b09025b1a..54fa3bbe3a82 100644
--- a/llvm/test/CodeGen/AMDGPU/accvgpr-spill-scc-clobber.mir
+++ b/llvm/test/CodeGen/AMDGPU/accvgpr-spill-scc-clobber.mir
@@ -1,8 +1,8 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX908 %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX90A %s
-# RUN: llc -amdgpu-enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX908-FLATSCR %s
-# RUN: llc -amdgpu-enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX90A-FLATSCR %s
+# RUN: llc -mattr=+enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX908-FLATSCR %s
+# RUN: llc -mattr=+enable-flat-scratch -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX90A-FLATSCR %s
---
name: agpr32_restore_clobber_scc
diff --git a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
index 668b2f9ed506..0850b8c77443 100644
--- a/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
+++ b/llvm/test/CodeGen/AMDGPU/addrspacecast-constantexpr.ll
@@ -230,6 +230,6 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { argmemonly nofree nounwind willreturn }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
index 21408e293958..c32713e9baf1 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa-call.ll
@@ -837,7 +837,7 @@ define float @func_other_intrinsic_call(float %arg) #3 {
ret float %fadd
}
-; Implicit arguments need to be enabled for sanitizers
+; Hostcall needs to be enabled for sanitizers
define amdgpu_kernel void @kern_sanitize_address() #4 {
; AKF_HSA-LABEL: define {{[^@]+}}@kern_sanitize_address
; AKF_HSA-SAME: () #[[ATTR5:[0-9]+]] {
@@ -853,7 +853,7 @@ define amdgpu_kernel void @kern_sanitize_address() #4 {
ret void
}
-; Implicit arguments need to be enabled for sanitizers
+; Hostcall needs to be enabled for sanitizers
define void @func_sanitize_address() #4 {
; AKF_HSA-LABEL: define {{[^@]+}}@func_sanitize_address
; AKF_HSA-SAME: () #[[ATTR5]] {
@@ -869,7 +869,7 @@ define void @func_sanitize_address() #4 {
ret void
}
-; Implicit arguments need to be enabled for sanitizers
+; Hostcall needs to be enabled for sanitizers
define void @func_indirect_sanitize_address() #3 {
; AKF_HSA-LABEL: define {{[^@]+}}@func_indirect_sanitize_address
; AKF_HSA-SAME: () #[[ATTR3]] {
@@ -885,7 +885,7 @@ define void @func_indirect_sanitize_address() #3 {
ret void
}
-; Implicit arguments need to be enabled for sanitizers
+; Hostcall needs to be enabled for sanitizers
define amdgpu_kernel void @kern_indirect_sanitize_address() #3 {
; AKF_HSA-LABEL: define {{[^@]+}}@kern_indirect_sanitize_address
; AKF_HSA-SAME: () #[[ATTR4]] {
@@ -937,22 +937,22 @@ attributes #5 = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" }
; AKF_HSA: attributes #[[ATTR6:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR12]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR13]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="gfx900" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR14]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "target-cpu"="fiji" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR15]] = { nounwind "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR16]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR17]] = { nounwind sanitize_address "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR18]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_HSA: attributes #[[ATTR19:[0-9]+]] = { nounwind sanitize_address "amdgpu-no-implicitarg-ptr" "uniform-work-group-size"="false" }
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
index 6b0ea17abd41..52dff4e2627a 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features-hsa.ll
@@ -647,15 +647,15 @@ attributes #1 = { nounwind }
; AKF_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-stack-objects" }
;.
; ATTRIBUTOR_HSA: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
-; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR10]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_HSA: attributes #[[ATTR11]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
index c93a6a4f797d..31230b47baeb 100644
--- a/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
+++ b/llvm/test/CodeGen/AMDGPU/annotate-kernel-features.ll
@@ -418,13 +418,13 @@ attributes #1 = { nounwind }
; AKF_CHECK: attributes #[[ATTR1]] = { nounwind }
;.
; ATTRIBUTOR_CHECK: attributes #[[ATTR0:[0-9]+]] = { nounwind readnone speculatable willreturn }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
-; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR1]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR2]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR3]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR4]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR5]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR6]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR7]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR8]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_CHECK: attributes #[[ATTR9]] = { nounwind "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workitem-id-x" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
index 82d879d76931..162fdbba4bfa 100644
--- a/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/call-preserved-registers.ll
@@ -1,7 +1,7 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -enable-ipra=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MUBUF %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -enable-ipra=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MUBUF %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-ipra=0 -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,MUBUF %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-ipra=0 -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-ipra=0 -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
declare hidden void @external_void_func_void() #3
diff --git a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
index e357b5adc8d4..918ef1c6fd42 100644
--- a/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
+++ b/llvm/test/CodeGen/AMDGPU/callee-frame-setup.ll
@@ -1,6 +1,6 @@
; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,MUBUF %s
; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,MUBUF %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-enable-flat-scratch < %s | FileCheck -enable-var-scope -check-prefixes=GCN,FLATSCR %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch < %s | FileCheck -enable-var-scope -check-prefixes=GCN,FLATSCR %s
; GCN-LABEL: {{^}}callee_no_stack:
; GCN: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
index 7bf107e3f4ff..d15f5c8e614a 100644
--- a/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
+++ b/llvm/test/CodeGen/AMDGPU/chain-hi-to-lo.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=-unaligned-access-mode < %s | FileCheck -check-prefixes=GCN,GFX900 %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=-unaligned-access-mode -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs -mattr=-unaligned-access-mode < %s | FileCheck -check-prefixes=GFX10,GFX10_DEFAULT %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs -mattr=-unaligned-access-mode -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=GFX10,FLATSCR_GFX10 %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -verify-machineinstrs -mattr=-unaligned-access-mode -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GFX10,FLATSCR_GFX10 %s
define <2 x half> @chain_hi_to_lo_private() {
; GFX900-LABEL: chain_hi_to_lo_private:
diff --git a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
index da8fdb8acdce..6c6850030faf 100644
--- a/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/direct-indirect-call.ll
@@ -35,6 +35,6 @@ define amdgpu_kernel void @test_direct_indirect_call() {
ret void
}
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; CHECK: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
index c19dc86afb7b..c68d4362554d 100644
--- a/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
+++ b/llvm/test/CodeGen/AMDGPU/duplicate-attribute-indirect.ll
@@ -42,6 +42,6 @@ attributes #0 = { "amdgpu-no-dispatch-id" }
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-no-dispatch-id" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/fail.llvm.fptrunc.round.ll b/llvm/test/CodeGen/AMDGPU/fail.llvm.fptrunc.round.ll
new file mode 100644
index 000000000000..7bfe11fcd30c
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/fail.llvm.fptrunc.round.ll
@@ -0,0 +1,11 @@
+; RUN: not --crash llc -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s --ignore-case --check-prefix=FAIL
+; RUN: not --crash llc -global-isel -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs -o /dev/null %s 2>&1 | FileCheck %s --ignore-case --check-prefix=FAIL
+
+define amdgpu_gs void @test_fptrunc_round_legalization(double %a, i32 %data0, <4 x i32> %data1, half addrspace(1)* %out) {
+; FAIL: LLVM ERROR: Cannot select
+ %res = call half @llvm.fptrunc.round.f64(double %a, metadata !"round.upward")
+ store half %res, half addrspace(1)* %out, align 4
+ ret void
+}
+
+declare half @llvm.fptrunc.round.f64(double, metadata)
diff --git a/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll
index 434b8f742365..3531fcecb0a6 100644
--- a/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll
+++ b/llvm/test/CodeGen/AMDGPU/fast-unaligned-load-store.private.ll
@@ -2,9 +2,9 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=-unaligned-scratch-access < %s | FileCheck --check-prefix=GFX7-ALIGNED %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=hawaii -mattr=+unaligned-scratch-access < %s | FileCheck --check-prefix=GFX7-UNALIGNED %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-scratch-access < %s | FileCheck --check-prefix=GFX9 %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-scratch-access -amdgpu-enable-flat-scratch < %s | FileCheck --check-prefix=GFX9-FLASTSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+unaligned-scratch-access -mattr=+enable-flat-scratch < %s | FileCheck --check-prefix=GFX9-FLASTSCR %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -mattr=+unaligned-scratch-access < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -mattr=+unaligned-scratch-access -amdgpu-enable-flat-scratch < %s | FileCheck --check-prefix=GFX10-FLASTSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 -mattr=+unaligned-scratch-access -mattr=+enable-flat-scratch < %s | FileCheck --check-prefix=GFX10-FLASTSCR %s
; Should not merge this to a dword load
define i32 @private_load_2xi16_align2(i16 addrspace(5)* %p) #0 {
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi.mir b/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi.mir
index d957efee38ea..abf8aac8db88 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi.mir
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-fold-fi.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-enable-flat-scratch -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
+# RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=+enable-flat-scratch -run-pass=si-fold-operands -verify-machineinstrs -o - %s | FileCheck -check-prefix=GCN %s
---
name: test_fold_fi_scratch_load_vgpr
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll
index 32c55cc51a8b..9edfbefa7fcd 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch-init.ll
@@ -1,5 +1,5 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 --amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLAT_SCR_OPT %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 --mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLAT_SCR_OPT %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1010 --mattr=+architected-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLAT_SCR_ARCH %s
declare void @extern_func() #0
diff --git a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
index abb1204512db..5cf182ba8222 100644
--- a/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat-scratch.ll
@@ -1,9 +1,9 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
-; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9 %s
-; RUN: llc -march=amdgcn -mcpu=gfx1030 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9-PAL %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1010 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX10-PAL,GFX1010-PAL %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX10-PAL,GFX1030-PAL %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9 %s
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10 %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9-PAL %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1010 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX10-PAL,GFX1010-PAL %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GFX10-PAL,GFX1030-PAL %s
define amdgpu_kernel void @zero_init_kernel() {
; GFX9-LABEL: zero_init_kernel:
diff --git a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
index 5ee80c8a238c..e086d504130a 100644
--- a/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
+++ b/llvm/test/CodeGen/AMDGPU/frame-index-elimination.ll
@@ -1,6 +1,6 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=kaveri -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,CI,MUBUF %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX9-MUBUF,MUBUF %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca -amdgpu-sroa=0 -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX9-FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=-promote-alloca,+enable-flat-scratch -amdgpu-sroa=0 -verify-machineinstrs < %s | FileCheck -enable-var-scope -check-prefixes=GCN,GFX9,GFX9-FLATSCR %s
; Test that non-entry function frame indices are expanded properly to
; give an index relative to the scratch wave offset register
diff --git a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
index 989e8bfe7f56..df6e07002e8a 100644
--- a/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
+++ b/llvm/test/CodeGen/AMDGPU/gfx-callable-argument-types.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX9 %s
; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10 %s
-; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1010 -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10-SCRATCH %s
+; RUN: llc -mtriple=amdgcn--amdpal -mcpu=gfx1010 -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefix=GFX10-SCRATCH %s
declare hidden amdgpu_gfx void @external_void_func_i1(i1) #0
declare hidden amdgpu_gfx void @external_void_func_i1_signext(i1 signext) #0
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel-v3.ll
index a8bec1d1012f..0531af841c8f 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel-v3.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel-v3.ll
@@ -50,7 +50,7 @@ define amdgpu_kernel void @test_non_enqueue_kernel_caller(i8 %a) #0
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 32
; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_none
+; CHECK-NEXT: .value_kind: hidden_hostcall_buffer
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 40
; CHECK-NEXT: .size: 8
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel.ll
index ca649e4aa9df..29da063b6de4 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-enqueue-kernel.ll
@@ -26,6 +26,9 @@
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
+; CHECK-NEXT: - Size: 8
+; CHECK-NEXT: Align: 8
+; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
; CHECK-NOT: ValueKind: HiddenDefaultQueue
; CHECK-NOT: ValueKind: HiddenCompletionAction
define amdgpu_kernel void @test_non_enqueue_kernel_caller(i8 %a) #0
@@ -56,7 +59,7 @@ define amdgpu_kernel void @test_non_enqueue_kernel_caller(i8 %a) #0
; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenNone
+; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
; CHECK-NEXT: AddrSpaceQual: Global
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v3.ll
index 59b48e9ff553..2b265c83d61b 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v3.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v3.ll
@@ -171,7 +171,7 @@ entry:
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 48
; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_none
+; CHECK-NEXT: .value_kind: hidden_hostcall_buffer
; CHECK: .name: test32
; CHECK: .symbol: test32.kd
define amdgpu_kernel void @test32(
@@ -214,7 +214,7 @@ entry:
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 48
; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_none
+; CHECK-NEXT: .value_kind: hidden_hostcall_buffer
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 56
; CHECK-NEXT: .size: 8
@@ -265,7 +265,7 @@ entry:
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 48
; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_none
+; CHECK-NEXT: .value_kind: hidden_hostcall_buffer
; CHECK-NEXT: - .address_space: global
; CHECK-NEXT: .offset: 56
; CHECK-NEXT: .size: 8
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v5.ll
index 580fecd906b9..85bbfcc929c3 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v5.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args-v5.ll
@@ -112,10 +112,8 @@ entry:
ret void
}
-!llvm.module.flags = !{!0}
!llvm.printf.fmts = !{!1, !2}
-!0 = !{i32 1, !"amdgpu_hostcall", i32 1}
!1 = !{!"1:1:4:%d\5Cn"}
!2 = !{!"2:1:8:%g\5Cn"}
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args.ll
index b6f73d8aeb69..cae375da0e08 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hidden-args.ll
@@ -177,7 +177,7 @@ entry:
; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenNone
+; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
; CHECK-NEXT: AddrSpaceQual: Global
; CHECK-NEXT: CodeProps:
define amdgpu_kernel void @test32(
@@ -221,7 +221,7 @@ entry:
; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenNone
+; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
; CHECK-NEXT: AddrSpaceQual: Global
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
@@ -273,7 +273,7 @@ entry:
; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenNone
+; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
; CHECK-NEXT: AddrSpaceQual: Global
; CHECK-NEXT: - Size: 8
; CHECK-NEXT: Align: 8
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent-v3.ll
deleted file mode 100644
index 54662841e5bf..000000000000
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent-v3.ll
+++ /dev/null
@@ -1,51 +0,0 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 -amdgpu-dump-hsa-metadata -amdgpu-verify-hsa-metadata -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
-
-; CHECK: ---
-; CHECK: amdhsa.kernels:
-; CHECK: - .args:
-; CHECK-NEXT: - .name: a
-; CHECK-NEXT: .offset: 0
-; CHECK-NEXT: .size: 1
-; CHECK-NEXT: .type_name: char
-; CHECK-NEXT: .value_kind: by_value
-; CHECK-NEXT: - .offset: 8
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_x
-; CHECK-NEXT: - .offset: 16
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_y
-; CHECK-NEXT: - .offset: 24
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_z
-
-; CHECK-NOT: .value_kind: hidden_hostcall_buffer
-
-; CHECK: .language: OpenCL C
-; CHECK-NEXT: .language_version:
-; CHECK-NEXT: - 2
-; CHECK-NEXT: - 0
-; CHECK: .name: test_kernel
-; CHECK: .symbol: test_kernel.kd
-
-define amdgpu_kernel void @test_kernel(i8 %a) #0
- !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
- !kernel_arg_base_type !3 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK: amdhsa.version:
-; CHECK-NEXT: - 1
-; CHECK-NEXT: - 0
-
-attributes #0 = { optnone noinline "amdgpu-implicitarg-num-bytes"="48" }
-
-!1 = !{i32 0}
-!2 = !{!"none"}
-!3 = !{!"char"}
-!4 = !{!""}
-
-!opencl.ocl.version = !{!90}
-!90 = !{i32 2, i32 0}
-
-; PARSER: AMDGPU HSA Metadata Parser Test: PASS
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent.ll
deleted file mode 100644
index 39fe68b6594a..000000000000
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-absent.ll
+++ /dev/null
@@ -1,48 +0,0 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=2 -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=2 -mcpu=gfx900 -amdgpu-dump-hsa-metadata -amdgpu-verify-hsa-metadata -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
-
-; CHECK: ---
-; CHECK: Version: [ 1, 0 ]
-; CHECK: Kernels:
-
-; CHECK: - Name: test_kernel
-; CHECK-NEXT: SymbolName: 'test_kernel@kd'
-; CHECK-NEXT: Language: OpenCL C
-; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
-; CHECK-NEXT: Args:
-; CHECK-NEXT: - Name: a
-; CHECK-NEXT: TypeName: char
-; CHECK-NEXT: Size: 1
-; CHECK-NEXT: Align: 1
-; CHECK-NEXT: ValueKind: ByValue
-; CHECK-NEXT: AccQual: Default
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
-; CHECK-NOT: ValueKind: HiddenHostcallBuffer
-; CHECK-NOT: ValueKind: HiddenDefaultQueue
-; CHECK-NOT: ValueKind: HiddenCompletionAction
-
-define amdgpu_kernel void @test_kernel(i8 %a) #0
- !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
- !kernel_arg_base_type !3 !kernel_arg_type_qual !4 {
- ret void
-}
-
-attributes #0 = { optnone noinline "amdgpu-implicitarg-num-bytes"="48" }
-
-!1 = !{i32 0}
-!2 = !{!"none"}
-!3 = !{!"char"}
-!4 = !{!""}
-
-!opencl.ocl.version = !{!90}
-!90 = !{i32 2, i32 0}
-
-; PARSER: AMDGPU HSA Metadata Parser Test: PASS
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3-asan.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3-asan.ll
index 5b63af45bbe9..e11ed4a9c7c7 100644
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3-asan.ll
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3-asan.ll
@@ -48,7 +48,4 @@ attributes #0 = { sanitize_address "amdgpu-implicitarg-num-bytes"="48" }
!opencl.ocl.version = !{!90}
!90 = !{i32 2, i32 0}
-!llvm.module.flags = !{!0}
-!0 = !{i32 4, !"amdgpu_hostcall", i32 1}
-
; CHECK: AMDGPU HSA Metadata Parser Test: PASS
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3.ll
deleted file mode 100644
index 80a04660095f..000000000000
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present-v3.ll
+++ /dev/null
@@ -1,55 +0,0 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 -amdgpu-dump-hsa-metadata -amdgpu-verify-hsa-metadata -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
-
-; CHECK: ---
-; CHECK: amdhsa.kernels:
-; CHECK: - .args:
-; CHECK-NEXT: - .name: a
-; CHECK-NEXT: .offset: 0
-; CHECK-NEXT: .size: 1
-; CHECK-NEXT: .type_name: char
-; CHECK-NEXT: .value_kind: by_value
-; CHECK-NEXT: - .offset: 8
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_x
-; CHECK-NEXT: - .offset: 16
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_y
-; CHECK-NEXT: - .offset: 24
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_global_offset_z
-; CHECK-NEXT: - .address_space: global
-; CHECK-NEXT: .offset: 32
-; CHECK-NEXT: .size: 8
-; CHECK-NEXT: .value_kind: hidden_hostcall_buffer
-; CHECK: .language: OpenCL C
-; CHECK-NEXT: .language_version:
-; CHECK-NEXT: - 2
-; CHECK-NEXT: - 0
-; CHECK: .name: test_kernel
-; CHECK: .symbol: test_kernel.kd
-
-define amdgpu_kernel void @test_kernel(i8 %a) #0
- !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
- !kernel_arg_base_type !3 !kernel_arg_type_qual !4 {
- ret void
-}
-
-; CHECK: amdhsa.version:
-; CHECK-NEXT: - 1
-; CHECK-NEXT: - 0
-
-attributes #0 = { optnone noinline "amdgpu-implicitarg-num-bytes"="48" }
-
-!1 = !{i32 0}
-!2 = !{!"none"}
-!3 = !{!"char"}
-!4 = !{!""}
-
-!opencl.ocl.version = !{!90}
-!90 = !{i32 2, i32 0}
-
-!llvm.module.flags = !{!0}
-!0 = !{i32 1, !"amdgpu_hostcall", i32 1}
-
-; PARSER: AMDGPU HSA Metadata Parser Test: PASS
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present.ll
deleted file mode 100644
index 87b6ecb6a81c..000000000000
--- a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-present.ll
+++ /dev/null
@@ -1,53 +0,0 @@
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=2 -mcpu=gfx900 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa --amdhsa-code-object-version=2 -mcpu=gfx900 -amdgpu-dump-hsa-metadata -amdgpu-verify-hsa-metadata -filetype=obj -o - < %s 2>&1 | FileCheck --check-prefix=PARSER %s
-
-; CHECK: ---
-; CHECK: Version: [ 1, 0 ]
-; CHECK: Kernels:
-
-; CHECK: - Name: test_kernel
-; CHECK-NEXT: SymbolName: 'test_kernel@kd'
-; CHECK-NEXT: Language: OpenCL C
-; CHECK-NEXT: LanguageVersion: [ 2, 0 ]
-; CHECK-NEXT: Args:
-; CHECK-NEXT: - Name: a
-; CHECK-NEXT: TypeName: char
-; CHECK-NEXT: Size: 1
-; CHECK-NEXT: Align: 1
-; CHECK-NEXT: ValueKind: ByValue
-; CHECK-NEXT: AccQual: Default
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetX
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetY
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenGlobalOffsetZ
-; CHECK-NEXT: - Size: 8
-; CHECK-NEXT: Align: 8
-; CHECK-NEXT: ValueKind: HiddenHostcallBuffer
-; CHECK-NEXT: AddrSpaceQual: Global
-; CHECK-NOT: ValueKind: HiddenDefaultQueue
-; CHECK-NOT: ValueKind: HiddenCompletionAction
-
-declare <2 x i64> @__ockl_hostcall_internal(i8*, i32, i64, i64, i64, i64, i64, i64, i64, i64)
-
-define amdgpu_kernel void @test_kernel(i8 %a) #0
- !kernel_arg_addr_space !1 !kernel_arg_access_qual !2 !kernel_arg_type !3
- !kernel_arg_base_type !3 !kernel_arg_type_qual !4 {
- ret void
-}
-
-attributes #0 = { optnone noinline "amdgpu-implicitarg-num-bytes"="48" }
-
-!1 = !{i32 0}
-!2 = !{!"none"}
-!3 = !{!"char"}
-!4 = !{!""}
-
-!opencl.ocl.version = !{!90}
-!90 = !{i32 2, i32 0}
-
-; PARSER: AMDGPU HSA Metadata Parser Test: PASS
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
new file mode 100644
index 000000000000..734ad53e9be0
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v3.ll
@@ -0,0 +1,303 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=3 < %s | FileCheck --check-prefix=CHECK %s
+
+declare void @function1()
+
+declare void @function2() #0
+
+; Function Attrs: noinline
+define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #4 {
+ store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+ ret void
+}
+
+; Function Attrs: noinline
+define void @function4(i64 %arg, i64* %a) #4 {
+ store i64 %arg, i64* %a
+ ret void
+}
+
+; Function Attrs: noinline
+define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #4 {
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 8
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %sink
+ ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+
+; CHECK: amdhsa.kernels:
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel10
+define amdgpu_kernel void @test_kernel10(i8* %a) #2 {
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Call to an extern function
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel20
+define amdgpu_kernel void @test_kernel20(i8* %a) #2 {
+ call void @function1()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Explicit attribute on kernel
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel21
+define amdgpu_kernel void @test_kernel21(i8* %a) #3 {
+ call void @function1()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Explicit attribute on extern callee
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel22
+define amdgpu_kernel void @test_kernel22(i8* %a) #2 {
+ call void @function2()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Access more bytes than the pointer size
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel30
+define amdgpu_kernel void @test_kernel30(i128* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
+ %x = load i128, i128 addrspace(4)* %cast
+ store i128 %x, i128* %a
+ ret void
+}
+
+; Typical load of hostcall buffer pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel40
+define amdgpu_kernel void @test_kernel40(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Typical usage, overriden by explicit attribute on kernel
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel41
+define amdgpu_kernel void @test_kernel41(i64* %a) #3 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Access to implicit arg before the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel42
+define amdgpu_kernel void @test_kernel42(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Access to implicit arg after the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel43
+define amdgpu_kernel void @test_kernel43(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Accessing a byte just before the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel44
+define amdgpu_kernel void @test_kernel44(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 23
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte inside the hostcall pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel45
+define amdgpu_kernel void @test_kernel45(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte inside the hostcall pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel46
+define amdgpu_kernel void @test_kernel46(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 31
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte just after the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel47
+define amdgpu_kernel void @test_kernel47(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 32
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Access with an unknown offset
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel50
+define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Multiple geps reaching the hostcall pointer argument.
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel51
+define amdgpu_kernel void @test_kernel51(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 8
+ %x = load i8, i8 addrspace(4)* %gep2, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Multiple geps not reaching the hostcall pointer argument.
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel52
+define amdgpu_kernel void @test_kernel52(i8* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
+ %x = load i8, i8 addrspace(4)* %gep2, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Hostcall pointer used inside a function call
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel60
+define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 24
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ call void @function4(i64 %x, i64* %a)
+ ret void
+}
+
+; Hostcall pointer retrieved inside a function call; chain of geps
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel61
+define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ call void @function5(i8 addrspace(4)* %gep, i64* %a)
+ ret void
+}
+
+; Pointer captured
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel70
+define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+ ret void
+}
+
+; Pointer captured inside function call
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel71
+define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+ ret void
+}
+
+; Ineffective pointer capture
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel72
+define amdgpu_kernel void @test_kernel72() #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+ ret void
+}
+
+attributes #0 = { "amdgpu-no-hostcall-ptr" }
+attributes #1 = { nounwind readnone speculatable willreturn }
+attributes #2 = { "amdgpu-implicitarg-num-bytes"="48" }
+attributes #3 = { "amdgpu-implicitarg-num-bytes"="48" "amdgpu-no-hostcall-ptr" }
+attributes #4 = { noinline }
diff --git a/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
new file mode 100644
index 000000000000..a832ca1d60aa
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/hsa-metadata-hostcall-v5.ll
@@ -0,0 +1,301 @@
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=5 -filetype=obj -o - < %s | llvm-readelf --notes - | FileCheck %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 --amdhsa-code-object-version=5 < %s | FileCheck --check-prefix=CHECK %s
+
+declare void @function1()
+
+declare void @function2() #0
+
+; Function Attrs: noinline
+define void @function3(i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink) #2 {
+ store i8 addrspace(4)* %argptr, i8 addrspace(4)* addrspace(1)* %sink, align 8
+ ret void
+}
+
+; Function Attrs: noinline
+define void @function4(i64 %arg, i64* %a) #2 {
+ store i64 %arg, i64* %a
+ ret void
+}
+
+; Function Attrs: noinline
+define void @function5(i8 addrspace(4)* %ptr, i64* %sink) #2 {
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 64
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %sink
+ ret void
+}
+
+; Function Attrs: nounwind readnone speculatable willreturn
+declare align 4 i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr() #1
+
+; CHECK: amdhsa.kernels:
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel10
+define amdgpu_kernel void @test_kernel10(i8* %a) {
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Call to an extern function
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel20
+define amdgpu_kernel void @test_kernel20(i8* %a) {
+ call void @function1()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Explicit attribute on kernel
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel21
+define amdgpu_kernel void @test_kernel21(i8* %a) #0 {
+ call void @function1()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Explicit attribute on extern callee
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel22
+define amdgpu_kernel void @test_kernel22(i8* %a) {
+ call void @function2()
+ store i8 3, i8* %a, align 1
+ ret void
+}
+
+; Access more bytes than the pointer size
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel30
+define amdgpu_kernel void @test_kernel30(i128* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 72
+ %cast = bitcast i8 addrspace(4)* %gep to i128 addrspace(4)*
+ %x = load i128, i128 addrspace(4)* %cast
+ store i128 %x, i128* %a
+ ret void
+}
+
+; Typical load of hostcall buffer pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel40
+define amdgpu_kernel void @test_kernel40(i64* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Typical usage, overriden by explicit attribute on kernel
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel41
+define amdgpu_kernel void @test_kernel41(i64* %a) #0 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Access to implicit arg before the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel42
+define amdgpu_kernel void @test_kernel42(i64* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 72
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Access to implicit arg after the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel43
+define amdgpu_kernel void @test_kernel43(i64* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ store i64 %x, i64* %a
+ ret void
+}
+
+; Accessing a byte just before the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel44
+define amdgpu_kernel void @test_kernel44(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 79
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte inside the hostcall pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel45
+define amdgpu_kernel void @test_kernel45(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte inside the hostcall pointer
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel46
+define amdgpu_kernel void @test_kernel46(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 87
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Accessing a byte just after the hostcall pointer
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel47
+define amdgpu_kernel void @test_kernel47(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 88
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Access with an unknown offset
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel50
+define amdgpu_kernel void @test_kernel50(i8* %a, i32 %b) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 %b
+ %x = load i8, i8 addrspace(4)* %gep, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Multiple geps reaching the hostcall pointer argument.
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel51
+define amdgpu_kernel void @test_kernel51(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 64
+ %x = load i8, i8 addrspace(4)* %gep2, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Multiple geps not reaching the hostcall pointer argument.
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel52
+define amdgpu_kernel void @test_kernel52(i8* %a) {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep1 = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ %gep2 = getelementptr inbounds i8, i8 addrspace(4)* %gep1, i64 16
+ %x = load i8, i8 addrspace(4)* %gep2, align 1
+ store i8 %x, i8* %a, align 1
+ ret void
+}
+
+; Hostcall pointer used inside a function call
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel60
+define amdgpu_kernel void @test_kernel60(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 80
+ %cast = bitcast i8 addrspace(4)* %gep to i64 addrspace(4)*
+ %x = load i64, i64 addrspace(4)* %cast
+ call void @function4(i64 %x, i64* %a)
+ ret void
+}
+
+; Hostcall pointer retrieved inside a function call; chain of geps
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel61
+define amdgpu_kernel void @test_kernel61(i64* %a) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i64 16
+ call void @function5(i8 addrspace(4)* %gep, i64* %a)
+ ret void
+}
+
+; Pointer captured
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel70
+define amdgpu_kernel void @test_kernel70(i8 addrspace(4)* addrspace(1)* %sink) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink, align 8
+ ret void
+}
+
+; Pointer captured inside function call
+
+; CHECK: - .args:
+; CHECK: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel71
+define amdgpu_kernel void @test_kernel71(i8 addrspace(4)* addrspace(1)* %sink) #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ call void @function3(i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* %sink)
+ ret void
+}
+
+; Ineffective pointer capture
+
+; CHECK: - .args:
+; CHECK-NOT: hidden_hostcall_buffer
+; CHECK-LABEL: .name: test_kernel72
+define amdgpu_kernel void @test_kernel72() #2 {
+ %ptr = tail call i8 addrspace(4)* @llvm.amdgcn.implicitarg.ptr()
+ %gep = getelementptr inbounds i8, i8 addrspace(4)* %ptr, i32 42
+ store i8 addrspace(4)* %gep, i8 addrspace(4)* addrspace(1)* undef, align 8
+ ret void
+}
+
+attributes #0 = { "amdgpu-no-hostcall-ptr" }
+attributes #1 = { nounwind readnone speculatable willreturn }
+attributes #2 = { noinline }
diff --git a/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll
new file mode 100644
index 000000000000..b4787f3eefe5
--- /dev/null
+++ b/llvm/test/CodeGen/AMDGPU/llvm.fptrunc.round.ll
@@ -0,0 +1,52 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -global-isel -march=amdgcn -mcpu=gfx1030 -verify-machineinstrs < %s | FileCheck %s
+
+define amdgpu_gs void @test_fptrunc_round_upward(float %a, i32 %data0, <4 x i32> %data1, half addrspace(1)* %out) {
+; CHECK-LABEL: test_fptrunc_round_upward:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1
+; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CHECK-NEXT: global_store_short v[6:7], v0, off
+; CHECK-NEXT: s_endpgm
+ %res = call half @llvm.fptrunc.round(float %a, metadata !"round.upward")
+ store half %res, half addrspace(1)* %out, align 4
+ ret void
+}
+
+define amdgpu_gs void @test_fptrunc_round_downward(float %a, i32 %data0, <4 x i32> %data1, half addrspace(1)* %out) {
+; CHECK-LABEL: test_fptrunc_round_downward:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 1
+; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CHECK-NEXT: global_store_short v[6:7], v0, off
+; CHECK-NEXT: s_endpgm
+ %res = call half @llvm.fptrunc.round(float %a, metadata !"round.downward")
+ store half %res, half addrspace(1)* %out, align 4
+ ret void
+}
+
+define amdgpu_gs void @test_fptrunc_round_upward_multiple_calls(float %a, float %b, i32 %data0, <4 x i32> %data1, half addrspace(1)* %out) {
+; CHECK-LABEL: test_fptrunc_round_upward_multiple_calls:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 1), 1
+; CHECK-NEXT: v_cvt_f16_f32_e32 v0, v0
+; CHECK-NEXT: v_cvt_f16_f32_e32 v2, v1
+; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 2, 2), 2
+; CHECK-NEXT: v_cvt_f16_f32_e32 v1, v1
+; CHECK-NEXT: s_setreg_imm32_b32 hwreg(HW_REG_MODE, 3, 1), 0
+; CHECK-NEXT: v_add_f16_e32 v0, v0, v2
+; CHECK-NEXT: v_add_f16_e32 v0, v1, v0
+; CHECK-NEXT: global_store_short v[7:8], v0, off
+; CHECK-NEXT: s_endpgm
+ %res1 = call half @llvm.fptrunc.round(float %a, metadata !"round.upward")
+ %res2 = call half @llvm.fptrunc.round(float %b, metadata !"round.upward")
+ %res3 = call half @llvm.fptrunc.round(float %b, metadata !"round.downward")
+ %res4 = fadd half %res1, %res2
+ %res5 = fadd half %res3, %res4
+ store half %res5, half addrspace(1)* %out, align 4
+ ret void
+}
+
+declare half @llvm.fptrunc.round(float, metadata)
diff --git a/llvm/test/CodeGen/AMDGPU/load-hi16.ll b/llvm/test/CodeGen/AMDGPU/load-hi16.ll
index 107476b82220..0072ab932302 100644
--- a/llvm/test/CodeGen/AMDGPU/load-hi16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-hi16.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,GFX900-MUBUF %s
; RUN: llc -march=amdgcn -mcpu=gfx906 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX906,NO-D16-HI %s
; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX803,NO-D16-HI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,GFX900-FLATSCR %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,GFX900,GFX900-FLATSCR %s
; GCN-LABEL: {{^}}load_local_lo_hi_v2i16_multi_use_lo:
; GFX900: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/load-lo16.ll b/llvm/test/CodeGen/AMDGPU/load-lo16.ll
index 70514a21e350..81cedae07a20 100644
--- a/llvm/test/CodeGen/AMDGPU/load-lo16.ll
+++ b/llvm/test/CodeGen/AMDGPU/load-lo16.ll
@@ -2,7 +2,7 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -check-prefixes=GFX900,GFX900-MUBUF %s
; RUN: llc -march=amdgcn -mcpu=gfx906 -amdgpu-sroa=0 -mattr=-promote-alloca,+sram-ecc -verify-machineinstrs < %s | FileCheck --check-prefix=GFX906 %s
; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck --check-prefix=GFX803 %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs --amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=GFX900,GFX900-FLATSCR %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs --mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GFX900,GFX900-FLATSCR %s
define <2 x i16> @load_local_lo_v2i16_undeflo(i16 addrspace(3)* %in) #0 {
; GFX900-LABEL: load_local_lo_v2i16_undeflo:
diff --git a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
index c6b9f8f28412..776d4ed37e4c 100644
--- a/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
+++ b/llvm/test/CodeGen/AMDGPU/local-stack-alloc-block-sp-reference.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 < %s | FileCheck --check-prefix=MUBUF %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 --amdgpu-enable-flat-scratch < %s | FileCheck --check-prefix=FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx906 --mattr=+enable-flat-scratch < %s | FileCheck --check-prefix=FLATSCR %s
; Make sure we use the correct frame offset is used with the local
; frame area.
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll b/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
index 4e8b1ab57c16..61579ba3c221 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-fixed-align.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 < %s | FileCheck %s -check-prefix=MUBUF
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -amdgpu-enable-flat-scratch < %s | FileCheck %s -check-prefix=FLATSCR
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -mattr=+enable-flat-scratch < %s | FileCheck %s -check-prefix=FLATSCR
; Make sure there's no assertion from passing a 0 alignment value
define void @memcpy_fixed_align(i8 addrspace(5)* %dst, i8 addrspace(1)* %src) {
diff --git a/llvm/test/CodeGen/AMDGPU/memory_clause.ll b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
index 75139bfa1cf1..6416bb3e025e 100644
--- a/llvm/test/CodeGen/AMDGPU/memory_clause.ll
+++ b/llvm/test/CodeGen/AMDGPU/memory_clause.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -march=amdgcn -mcpu=gfx902 -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s
-; RUN: llc -march=amdgcn -mcpu=gfx1030 -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SCRATCH %s
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefix=GCN-SCRATCH %s
define amdgpu_kernel void @vector_clause(<4 x i32> addrspace(1)* noalias nocapture readonly %arg, <4 x i32> addrspace(1)* noalias nocapture %arg1) {
; GCN-LABEL: vector_clause:
diff --git a/llvm/test/CodeGen/AMDGPU/multi-dword-vgpr-spill.ll b/llvm/test/CodeGen/AMDGPU/multi-dword-vgpr-spill.ll
index 6badf1f1886a..73d36325a88e 100644
--- a/llvm/test/CodeGen/AMDGPU/multi-dword-vgpr-spill.ll
+++ b/llvm/test/CodeGen/AMDGPU/multi-dword-vgpr-spill.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck %s -check-prefixes=GCN,MUBUF
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 -amdgpu-enable-flat-scratch < %s | FileCheck %s -check-prefixes=GCN,FLATSCR
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 -mattr=+enable-flat-scratch < %s | FileCheck %s -check-prefixes=GCN,FLATSCR
; GCN-LABEL: spill_v2i32:
; MUBUF-DAG: buffer_store_dword v{{.*}} offset:16 ; 4-byte Folded Spill
diff --git a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
index 50e2c8ce349d..d626d8477eda 100644
--- a/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
+++ b/llvm/test/CodeGen/AMDGPU/non-entry-alloca.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs < %s | FileCheck -check-prefixes=DEFAULTSIZE,MUBUF %s
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -amdgpu-assume-dynamic-stack-object-size=1024 < %s | FileCheck -check-prefixes=ASSUME1024,MUBUF %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=DEFAULTSIZE,FLATSCR %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-assume-dynamic-stack-object-size=1024 < %s | FileCheck -check-prefixes=ASSUME1024,FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=DEFAULTSIZE,FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-assume-dynamic-stack-object-size=1024 < %s | FileCheck -check-prefixes=ASSUME1024,FLATSCR %s
; FIXME: Generated test checks do not check metadata at the end of the
; function, so this also includes manually added checks.
diff --git a/llvm/test/CodeGen/AMDGPU/pei-build-av-spill.mir b/llvm/test/CodeGen/AMDGPU/pei-build-av-spill.mir
index 348c665332f8..1ae87e4697ed 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-build-av-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-build-av-spill.mir
@@ -1,12 +1,12 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-V2A %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx908 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-GFX90A %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-GFX90A-V2A %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A-V2A %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A-V2A %s
---
name: test_spill_av_v1
diff --git a/llvm/test/CodeGen/AMDGPU/pei-build-spill-partial-agpr.mir b/llvm/test/CodeGen/AMDGPU/pei-build-spill-partial-agpr.mir
index 40f449bcd807..3eaa44691ab9 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-build-spill-partial-agpr.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-build-spill-partial-agpr.mir
@@ -1,6 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -run-pass=si-lower-sgpr-spills,prologepilog -o - %s | FileCheck -check-prefix=MUBUF-V2A %s
-# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-enable-flat-scratch -run-pass=si-lower-sgpr-spills,prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
+# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -mattr=+enable-flat-scratch -run-pass=si-lower-sgpr-spills,prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
--- |
diff --git a/llvm/test/CodeGen/AMDGPU/pei-build-spill.mir b/llvm/test/CodeGen/AMDGPU/pei-build-spill.mir
index afab15b53658..b24f695d8fca 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-build-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-build-spill.mir
@@ -1,12 +1,12 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF %s
# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-V2A %s
-# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR %s
-# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
+# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR %s
+# RUN: llc -march=amdgcn -mcpu=gfx908 -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-V2A %s
# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-GFX90A %s
# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=MUBUF-GFX90A-V2A %s
-# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A %s
-# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -amdgpu-enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A-V2A %s
+# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=0 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A %s
+# RUN: llc -march=amdgcn -mcpu=gfx90a -verify-machineinstrs -mattr=+enable-flat-scratch -amdgpu-spill-vgpr-to-agpr=1 -run-pass=prologepilog -o - %s | FileCheck -check-prefix=FLATSCR-GFX90A-V2A %s
---
name: test_spill_v1
diff --git a/llvm/test/CodeGen/AMDGPU/pei-scavenge-sgpr-gfx9.mir b/llvm/test/CodeGen/AMDGPU/pei-scavenge-sgpr-gfx9.mir
index 2562d20c8a92..1771c74e6d8f 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-scavenge-sgpr-gfx9.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-scavenge-sgpr-gfx9.mir
@@ -1,6 +1,6 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck --check-prefix=MUBUF %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -amdgpu-enable-flat-scratch %s -o - | FileCheck --check-prefix=FLATSCR %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -mattr=+enable-flat-scratch %s -o - | FileCheck --check-prefix=FLATSCR %s
# Test what happens when an SGPR is unavailable for the unused add. The non-inline constant needs to be folded into the add instruction and not materialized in a register.
diff --git a/llvm/test/CodeGen/AMDGPU/pei-scavenge-vgpr-spill.mir b/llvm/test/CodeGen/AMDGPU/pei-scavenge-vgpr-spill.mir
index 11a61144bac9..d2ea39883e68 100644
--- a/llvm/test/CodeGen/AMDGPU/pei-scavenge-vgpr-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/pei-scavenge-vgpr-spill.mir
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX8 %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GFX9 %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -amdgpu-enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX9-FLATSCR %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -mattr=+enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX9-FLATSCR %s
# Test case where spilling a VGPR to an emergency slot is needed during frame index elimination.
diff --git a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
index 4f1bd890ea60..20e46a2ec732 100644
--- a/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
+++ b/llvm/test/CodeGen/AMDGPU/propagate-flat-work-group-size.ll
@@ -202,13 +202,13 @@ attributes #5 = { "amdgpu-flat-work-group-size"="128,512" }
attributes #6 = { "amdgpu-flat-work-group-size"="512,512" }
attributes #7 = { "amdgpu-flat-work-group-size"="64,256" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-flat-work-group-size"="1,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-flat-work-group-size"="64,128" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-flat-work-group-size"="128,512" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR3]] = { "amdgpu-flat-work-group-size"="64,64" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR4]] = { "amdgpu-flat-work-group-size"="128,128" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR5]] = { "amdgpu-flat-work-group-size"="512,512" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR6]] = { "amdgpu-flat-work-group-size"="64,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR7]] = { "amdgpu-flat-work-group-size"="128,256" "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR8]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
index 90cd56053f8d..9d8397f640ac 100644
--- a/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
+++ b/llvm/test/CodeGen/AMDGPU/scratch-simple.ll
@@ -4,10 +4,10 @@
; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -filetype=obj -amdgpu-use-divergent-register-indexing < %s | llvm-readobj -r - | FileCheck --check-prefix=RELS %s
; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,MUBUF,GFX10_W32-MUBUF,GFX9_10-MUBUF %s
; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1010 -mattr=-flat-for-global,+wavefrontsize64 -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,MUBUF,GFX10_W64-MUBUF,GFX9_10-MUBUF %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX9-FLATSCR %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1030 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX10-FLATSCR %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX9-FLATSCR-PAL %s
-; RUN: llc -march=amdgcn -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=-flat-for-global -amdgpu-use-divergent-register-indexing -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX10-FLATSCR-PAL %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx900 -mattr=-flat-for-global,+enable-flat-scratch -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX9-FLATSCR %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -mcpu=gfx1030 -mattr=-flat-for-global,+enable-flat-scratch -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX10-FLATSCR %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn--amdpal -mcpu=gfx900 -mattr=-flat-for-global,+enable-flat-scratch -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX9-FLATSCR-PAL %s
+; RUN: llc -march=amdgcn -mtriple=amdgcn--amdpal -mcpu=gfx1030 -mattr=-flat-for-global,+enable-flat-scratch -amdgpu-use-divergent-register-indexing -verify-machineinstrs < %s | FileCheck --check-prefixes=GCN,GFX9_10,FLATSCR,GFX10-FLATSCR-PAL %s
; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD0
; RELS: R_AMDGPU_ABS32_LO SCRATCH_RSRC_DWORD1
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-spill.mir b/llvm/test/CodeGen/AMDGPU/sgpr-spill.mir
index ed3b1b0006e9..aa9363d58a34 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-spill.mir
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-spill.mir
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GCN64-MUBUF %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -mattr=+wavefrontsize32,-wavefrontsize64 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GCN32-MUBUF %s
-# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -amdgpu-enable-flat-scratch -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GCN64-FLATSCR %s
+# RUN: llc -mtriple=amdgcn -mcpu=gfx900 -verify-machineinstrs -mattr=+enable-flat-scratch -run-pass=prologepilog %s -o - | FileCheck -check-prefix=GCN64-FLATSCR %s
# RUN: llc -mtriple=amdgcn -mcpu=gfx1010 -filetype=obj -verify-machineinstrs -start-before=prologepilog %s -o /dev/null
# Check not crashing when emitting ISA
diff --git a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
index f4eaa6082376..3786e8ef2ad4 100644
--- a/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
+++ b/llvm/test/CodeGen/AMDGPU/simple-indirect-call.ll
@@ -73,6 +73,6 @@ define amdgpu_kernel void @test_simple_indirect_call() {
;.
; AKF_GCN: attributes #[[ATTR0]] = { "amdgpu-calls" "amdgpu-stack-objects" }
;.
-; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; ATTRIBUTOR_GCN: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
; ATTRIBUTOR_GCN: attributes #[[ATTR1]] = { "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll b/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll
index 24156cdb031e..8077a0b6adfb 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-offset-calculation.ll
@@ -1,5 +1,5 @@
; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 < %s | FileCheck -check-prefixes=GCN,MUBUF %s
-; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
+; RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -stress-regalloc=8 -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=GCN,FLATSCR %s
; Test that the VGPR spiller correctly switches to SGPR offsets when the
; instruction offset field would overflow, and that it accounts for memory
diff --git a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
index 31af055ad015..e0fc1e19b167 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
+++ b/llvm/test/CodeGen/AMDGPU/spill-scavenge-offset.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -mcpu=verde -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX6 %s
; RUN: llc -sgpr-regalloc=basic -vgpr-regalloc=basic -march=amdgcn -mcpu=tonga -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck --check-prefix=CHECK %s
-; RUN: llc -march=amdgcn -mattr=-xnack -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=CHECK,GFX9-FLATSCR,FLATSCR %s
-; RUN: llc -march=amdgcn -mcpu=gfx1030 -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 -amdgpu-enable-flat-scratch < %s | FileCheck -check-prefixes=CHECK,GFX10-FLATSCR,FLATSCR %s
+; RUN: llc -march=amdgcn -mattr=-xnack,+enable-flat-scratch -mcpu=gfx900 -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 < %s | FileCheck -check-prefixes=CHECK,GFX9-FLATSCR,FLATSCR %s
+; RUN: llc -march=amdgcn -mcpu=gfx1030 -enable-misched=0 -post-RA-scheduler=0 -amdgpu-spill-sgpr-to-vgpr=0 -mattr=+enable-flat-scratch < %s | FileCheck -check-prefixes=CHECK,GFX10-FLATSCR,FLATSCR %s
;
; There is something about Tonga that causes this test to spend a lot of time
; in the default register allocator.
diff --git a/llvm/test/CodeGen/AMDGPU/spill-to-agpr-partial.mir b/llvm/test/CodeGen/AMDGPU/spill-to-agpr-partial.mir
index 43ef62ec553e..5199fb0eefed 100644
--- a/llvm/test/CodeGen/AMDGPU/spill-to-agpr-partial.mir
+++ b/llvm/test/CodeGen/AMDGPU/spill-to-agpr-partial.mir
@@ -1,5 +1,5 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-enable-flat-scratch -verify-machineinstrs -run-pass=prologepilog -o - %s | FileCheck -check-prefix=GCN %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+enable-flat-scratch -verify-machineinstrs -run-pass=prologepilog -o - %s | FileCheck -check-prefix=GCN %s
---
name: partial_spill_v128_1_of_4
diff --git a/llvm/test/CodeGen/AMDGPU/stack-pointer-offset-relative-frameindex.ll b/llvm/test/CodeGen/AMDGPU/stack-pointer-offset-relative-frameindex.ll
index d3e2df6763d5..63af19beaa0d 100644
--- a/llvm/test/CodeGen/AMDGPU/stack-pointer-offset-relative-frameindex.ll
+++ b/llvm/test/CodeGen/AMDGPU/stack-pointer-offset-relative-frameindex.ll
@@ -1,6 +1,6 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -march=amdgcn -mcpu=gfx1010 -verify-machineinstrs | FileCheck -check-prefix=MUBUF %s
-; RUN: llc < %s -march=amdgcn -mcpu=gfx1010 -amdgpu-enable-flat-scratch -verify-machineinstrs | FileCheck -check-prefix=FLATSCR %s
+; RUN: llc < %s -march=amdgcn -mcpu=gfx1010 -mattr=+enable-flat-scratch -verify-machineinstrs | FileCheck -check-prefix=FLATSCR %s
; During instruction selection, we use immediate const zero for soffset in
; MUBUF stack accesses and let eliminateFrameIndex to fix up this field to use
diff --git a/llvm/test/CodeGen/AMDGPU/store-hi16.ll b/llvm/test/CodeGen/AMDGPU/store-hi16.ll
index 98f9dad9e851..dd32021532f5 100644
--- a/llvm/test/CodeGen/AMDGPU/store-hi16.ll
+++ b/llvm/test/CodeGen/AMDGPU/store-hi16.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,GFX900,GFX9,GFX900-MUBUF %s
; RUN: llc -march=amdgcn -mcpu=gfx906 -amdgpu-sroa=0 -mattr=-promote-alloca,+sram-ecc -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,GFX906,GFX9,NO-D16-HI %s
; RUN: llc -march=amdgcn -mcpu=fiji -amdgpu-sroa=0 -mattr=-promote-alloca -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,GFX803,NO-D16-HI %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,GFX900,GFX9,GFX900-FLATSCR %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -amdgpu-sroa=0 -mattr=-promote-alloca -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -allow-deprecated-dag-overlap -check-prefixes=GCN,GFX900,GFX9,GFX900-FLATSCR %s
; GCN-LABEL: {{^}}store_global_hi_v2i16:
; GCN: s_waitcnt
diff --git a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
index 645eead8c297..bd4360cb2e1f 100644
--- a/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
+++ b/llvm/test/CodeGen/AMDGPU/unaligned-load-store.ll
@@ -1,7 +1,7 @@
; RUN: llc -march=amdgcn -verify-machineinstrs< %s | FileCheck -check-prefixes=SI,MUBUF,ALIGNED %s
; RUN: llc -march=amdgcn -mcpu=bonaire -mattr=+unaligned-access-mode -verify-machineinstrs< %s | FileCheck -check-prefixes=SI,MUBUF,UNALIGNED %s
; RUN: llc -march=amdgcn -mcpu=tonga -mattr=-flat-for-global -verify-machineinstrs< %s | FileCheck -check-prefixes=SI,MUBUF,ALIGNED %s
-; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -amdgpu-enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=SI,FLATSCR,ALIGNED %s
+; RUN: llc -march=amdgcn -mcpu=gfx900 -mattr=-flat-for-global -mattr=+enable-flat-scratch -verify-machineinstrs < %s | FileCheck -check-prefixes=SI,FLATSCR,ALIGNED %s
; SI-LABEL: {{^}}local_unaligned_load_store_i16:
; SI: ds_read_u8
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
index 003d4c74c0fc..78caeeaa1f3d 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-attribute-missing.ll
@@ -31,5 +31,5 @@ define amdgpu_kernel void @kernel1() #1 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
index e9179b0213b9..afb0d07f0c25 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-multistep.ll
@@ -97,6 +97,6 @@ define amdgpu_kernel void @kernel2() #0 {
attributes #0 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
index 292022039e5d..2dc57b42cfba 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-nested-function-calls.ll
@@ -41,6 +41,6 @@ define amdgpu_kernel void @kernel3() #2 {
attributes #2 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
index cd888064cada..745928d78467 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-prevent-attribute-propagation.ll
@@ -41,6 +41,6 @@ define amdgpu_kernel void @kernel2() #2 {
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
index 3ac9f0675bfe..7a5f1eae341f 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-recursion-test.ll
@@ -101,7 +101,7 @@ define amdgpu_kernel void @kernel(i32 addrspace(1)* %m) #1 {
attributes #0 = { nounwind readnone }
attributes #1 = { "uniform-work-group-size"="true" }
;.
-; CHECK: attributes #[[ATTR0]] = { nounwind readnone "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
-; CHECK: attributes #[[ATTR1]] = { nounwind readnone "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
-; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR0]] = { nounwind readnone "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR1]] = { nounwind readnone "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
+; CHECK: attributes #[[ATTR2]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="true" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
index 1381f871369d..bdc1c28f1654 100644
--- a/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
+++ b/llvm/test/CodeGen/AMDGPU/uniform-work-group-test.ll
@@ -61,5 +61,5 @@ define amdgpu_kernel void @kernel3() #0 {
attributes #0 = { "uniform-work-group-size"="false" }
;.
-; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
+; CHECK: attributes #[[ATTR0]] = { "amdgpu-no-dispatch-id" "amdgpu-no-dispatch-ptr" "amdgpu-no-hostcall-ptr" "amdgpu-no-implicitarg-ptr" "amdgpu-no-queue-ptr" "amdgpu-no-workgroup-id-x" "amdgpu-no-workgroup-id-y" "amdgpu-no-workgroup-id-z" "amdgpu-no-workitem-id-x" "amdgpu-no-workitem-id-y" "amdgpu-no-workitem-id-z" "uniform-work-group-size"="false" }
;.
diff --git a/llvm/test/CodeGen/AMDGPU/vector-spill-restore-to-other-vector-type.mir b/llvm/test/CodeGen/AMDGPU/vector-spill-restore-to-other-vector-type.mir
index 474e755e2933..db54cf60fba4 100644
--- a/llvm/test/CodeGen/AMDGPU/vector-spill-restore-to-other-vector-type.mir
+++ b/llvm/test/CodeGen/AMDGPU/vector-spill-restore-to-other-vector-type.mir
@@ -1,4 +1,4 @@
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -amdgpu-enable-flat-scratch -verify-machineinstrs -run-pass=prologepilog -o - %s | FileCheck -check-prefix=GCN %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx90a -mattr=+enable-flat-scratch -verify-machineinstrs -run-pass=prologepilog -o - %s | FileCheck -check-prefix=GCN %s
# A spilled register can be restored to its superclass during regalloc.
# As a result, we might see AGPR spills restored to VGPRs or the other way around.
diff --git a/llvm/test/CodeGen/AMDGPU/vgpr-spill-scc-clobber.mir b/llvm/test/CodeGen/AMDGPU/vgpr-spill-scc-clobber.mir
index 57f33e93f1db..c50cd752df98 100644
--- a/llvm/test/CodeGen/AMDGPU/vgpr-spill-scc-clobber.mir
+++ b/llvm/test/CodeGen/AMDGPU/vgpr-spill-scc-clobber.mir
@@ -1,7 +1,7 @@
# NOTE: Assertions have been autogenerated by utils/update_mir_test_checks.py
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=MUBUF %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -amdgpu-enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX9-FLATSCR %s
-# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -verify-machineinstrs -run-pass=prologepilog -amdgpu-enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX10-FLATSCR %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx900 -verify-machineinstrs -run-pass=prologepilog -mattr=+enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX9-FLATSCR %s
+# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=gfx1030 -verify-machineinstrs -run-pass=prologepilog -mattr=+enable-flat-scratch %s -o - | FileCheck -check-prefix=GFX10-FLATSCR %s
# RUN: llc -mtriple=amdgcn-amd-amdhsa -mcpu=fiji -amdgpu-spill-sgpr-to-vgpr=0 -verify-machineinstrs -run-pass=prologepilog %s -o - | FileCheck -check-prefix=VMEM-GFX8 %s
diff --git a/llvm/test/CodeGen/M68k/Arith/bitwise.ll b/llvm/test/CodeGen/M68k/Arith/bitwise.ll
index d5fe191ab174..fa8ea1370f86 100644
--- a/llvm/test/CodeGen/M68k/Arith/bitwise.ll
+++ b/llvm/test/CodeGen/M68k/Arith/bitwise.ll
@@ -230,3 +230,123 @@ define i32 @eoril(i32 %a) nounwind {
%1 = xor i32 %a, 305419896
ret i32 %1
}
+
+define i64 @lshr64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: lshr64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: suba.l #12, %sp
+; CHECK-NEXT: movem.l %d2-%d4, (0,%sp) ; 16-byte Folded Spill
+; CHECK-NEXT: move.l (28,%sp), %d3
+; CHECK-NEXT: move.l (16,%sp), %d2
+; CHECK-NEXT: move.l %d3, %d1
+; CHECK-NEXT: add.l #-32, %d1
+; CHECK-NEXT: bmi .LBB18_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: move.l #0, %d0
+; CHECK-NEXT: bra .LBB18_3
+; CHECK-NEXT: .LBB18_1:
+; CHECK-NEXT: move.l %d2, %d0
+; CHECK-NEXT: lsr.l %d3, %d0
+; CHECK-NEXT: .LBB18_3:
+; CHECK-NEXT: move.l %d3, %d4
+; CHECK-NEXT: add.l #-32, %d4
+; CHECK-NEXT: bmi .LBB18_4
+; CHECK-NEXT: ; %bb.5:
+; CHECK-NEXT: lsr.l %d1, %d2
+; CHECK-NEXT: move.l %d2, %d1
+; CHECK-NEXT: bra .LBB18_6
+; CHECK-NEXT: .LBB18_4:
+; CHECK-NEXT: move.l %d3, %d4
+; CHECK-NEXT: eori.l #31, %d4
+; CHECK-NEXT: lsl.l #1, %d2
+; CHECK-NEXT: move.l (20,%sp), %d1
+; CHECK-NEXT: lsl.l %d4, %d2
+; CHECK-NEXT: lsr.l %d3, %d1
+; CHECK-NEXT: or.l %d2, %d1
+; CHECK-NEXT: .LBB18_6:
+; CHECK-NEXT: movem.l (0,%sp), %d2-%d4 ; 16-byte Folded Reload
+; CHECK-NEXT: adda.l #12, %sp
+; CHECK-NEXT: rts
+ %1 = lshr i64 %a, %b
+ ret i64 %1
+}
+
+define i64 @ashr64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: ashr64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: suba.l #8, %sp
+; CHECK-NEXT: movem.l %d2-%d3, (0,%sp) ; 12-byte Folded Spill
+; CHECK-NEXT: move.l (24,%sp), %d2
+; CHECK-NEXT: move.l (12,%sp), %d0
+; CHECK-NEXT: move.l %d2, %d3
+; CHECK-NEXT: add.l #-32, %d3
+; CHECK-NEXT: move.l %d2, %d1
+; CHECK-NEXT: add.l #-32, %d1
+; CHECK-NEXT: bmi .LBB19_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: move.l %d0, %d1
+; CHECK-NEXT: asr.l %d3, %d1
+; CHECK-NEXT: bra .LBB19_3
+; CHECK-NEXT: .LBB19_1:
+; CHECK-NEXT: move.l %d2, %d1
+; CHECK-NEXT: eori.l #31, %d1
+; CHECK-NEXT: move.l %d0, %d3
+; CHECK-NEXT: lsl.l #1, %d3
+; CHECK-NEXT: lsl.l %d1, %d3
+; CHECK-NEXT: move.l (16,%sp), %d1
+; CHECK-NEXT: lsr.l %d2, %d1
+; CHECK-NEXT: or.l %d3, %d1
+; CHECK-NEXT: .LBB19_3:
+; CHECK-NEXT: move.l %d2, %d3
+; CHECK-NEXT: add.l #-32, %d3
+; CHECK-NEXT: bmi .LBB19_5
+; CHECK-NEXT: ; %bb.4:
+; CHECK-NEXT: move.l #31, %d2
+; CHECK-NEXT: .LBB19_5:
+; CHECK-NEXT: asr.l %d2, %d0
+; CHECK-NEXT: movem.l (0,%sp), %d2-%d3 ; 12-byte Folded Reload
+; CHECK-NEXT: adda.l #8, %sp
+; CHECK-NEXT: rts
+ %1 = ashr i64 %a, %b
+ ret i64 %1
+}
+
+define i64 @shl64(i64 %a, i64 %b) nounwind {
+; CHECK-LABEL: shl64:
+; CHECK: ; %bb.0:
+; CHECK-NEXT: suba.l #12, %sp
+; CHECK-NEXT: movem.l %d2-%d4, (0,%sp) ; 16-byte Folded Spill
+; CHECK-NEXT: move.l (28,%sp), %d3
+; CHECK-NEXT: move.l (20,%sp), %d2
+; CHECK-NEXT: move.l %d3, %d0
+; CHECK-NEXT: add.l #-32, %d0
+; CHECK-NEXT: bmi .LBB20_1
+; CHECK-NEXT: ; %bb.2:
+; CHECK-NEXT: move.l #0, %d1
+; CHECK-NEXT: bra .LBB20_3
+; CHECK-NEXT: .LBB20_1:
+; CHECK-NEXT: move.l %d2, %d1
+; CHECK-NEXT: lsl.l %d3, %d1
+; CHECK-NEXT: .LBB20_3:
+; CHECK-NEXT: move.l %d3, %d4
+; CHECK-NEXT: add.l #-32, %d4
+; CHECK-NEXT: bmi .LBB20_4
+; CHECK-NEXT: ; %bb.5:
+; CHECK-NEXT: lsl.l %d0, %d2
+; CHECK-NEXT: move.l %d2, %d0
+; CHECK-NEXT: bra .LBB20_6
+; CHECK-NEXT: .LBB20_4:
+; CHECK-NEXT: move.l %d3, %d4
+; CHECK-NEXT: eori.l #31, %d4
+; CHECK-NEXT: lsr.l #1, %d2
+; CHECK-NEXT: move.l (16,%sp), %d0
+; CHECK-NEXT: lsr.l %d4, %d2
+; CHECK-NEXT: lsl.l %d3, %d0
+; CHECK-NEXT: or.l %d2, %d0
+; CHECK-NEXT: .LBB20_6:
+; CHECK-NEXT: movem.l (0,%sp), %d2-%d4 ; 16-byte Folded Reload
+; CHECK-NEXT: adda.l #12, %sp
+; CHECK-NEXT: rts
+ %1 = shl i64 %a, %b
+ ret i64 %1
+}
diff --git a/llvm/test/CodeGen/M68k/Control/cmp.ll b/llvm/test/CodeGen/M68k/Control/cmp.ll
index 46f981b4d147..2a02ce1792e5 100644
--- a/llvm/test/CodeGen/M68k/Control/cmp.ll
+++ b/llvm/test/CodeGen/M68k/Control/cmp.ll
@@ -297,7 +297,7 @@ define void @test20(i32 %bf.load, i8 %x1, i8* %b_addr) {
; CHECK-NEXT: move.l (16,%sp), %a0
; CHECK-NEXT: move.b (15,%sp), %d2
; CHECK-NEXT: and.l #255, %d2
-; CHECK-NEXT: add.l %d2, %d1
+; CHECK-NEXT: add.l %d1, %d2
; CHECK-NEXT: sne (%a0)
; CHECK-NEXT: cmpi.l #0, %d0
; CHECK-NEXT: lea (d,%pc), %a0
diff --git a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
index 89592c6ecbb2..d5de71d7e42d 100644
--- a/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
+++ b/llvm/test/CodeGen/RISCV/ctlz-cttz-ctpop.ll
@@ -167,10 +167,10 @@ define i16 @test_cttz_i16(i16 %a) nounwind {
define i32 @test_cttz_i32(i32 %a) nounwind {
; RV32I-LABEL: test_cttz_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB2_2
-; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: beqz a0, .LBB2_2
+; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi a1, a0, -1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: and a0, a0, a1
@@ -194,20 +194,21 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
; RV32I-NEXT: addi a1, a1, 257
; RV32I-NEXT: call __mulsi3@plt
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
-; RV32I-NEXT: ret
+; RV32I-NEXT: j .LBB2_3
; RV32I-NEXT: .LBB2_2:
; RV32I-NEXT: li a0, 32
+; RV32I-NEXT: .LBB2_3: # %cond.end
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_cttz_i32:
; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: beqz a1, .LBB2_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: addiw a1, a0, -1
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: and a0, a0, a1
@@ -231,11 +232,12 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB2_3
; RV64I-NEXT: .LBB2_2:
; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: .LBB2_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: test_cttz_i32:
@@ -254,10 +256,10 @@ define i32 @test_cttz_i32(i32 %a) nounwind {
define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-LABEL: test_ctlz_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB3_2
-; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: beqz a0, .LBB3_2
+; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -289,20 +291,21 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: addi a1, a1, 257
; RV32I-NEXT: call __mulsi3@plt
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
-; RV32I-NEXT: ret
+; RV32I-NEXT: j .LBB3_3
; RV32I-NEXT: .LBB3_2:
; RV32I-NEXT: li a0, 32
+; RV32I-NEXT: .LBB3_3: # %cond.end
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV64I-LABEL: test_ctlz_i32:
; RV64I: # %bb.0:
+; RV64I-NEXT: addi sp, sp, -16
+; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: sext.w a1, a0
; RV64I-NEXT: beqz a1, .LBB3_2
; RV64I-NEXT: # %bb.1: # %cond.false
-; RV64I-NEXT: addi sp, sp, -16
-; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -334,11 +337,12 @@ define i32 @test_ctlz_i32(i32 %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB3_3
; RV64I-NEXT: .LBB3_2:
; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: .LBB3_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: test_ctlz_i32:
@@ -429,10 +433,10 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
;
; RV64I-LABEL: test_cttz_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB4_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB4_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: and a0, a0, a1
@@ -456,11 +460,12 @@ define i64 @test_cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: ld a1, %lo(.LCPI4_3)(a1)
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB4_3
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 64
+; RV64I-NEXT: .LBB4_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV32ZBB-LABEL: test_cttz_i64:
diff --git a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
index 2467280a95f2..6b535a2bdc43 100644
--- a/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/double-br-fcmp.ll
@@ -10,24 +10,28 @@ declare void @exit(i32)
define void @br_fcmp_false(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_false:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: li a0, 1
; RV32IFD-NEXT: bnez a0, .LBB0_2
; RV32IFD-NEXT: # %bb.1: # %if.then
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB0_2: # %if.else
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_false:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: li a0, 1
; RV64IFD-NEXT: bnez a0, .LBB0_2
; RV64IFD-NEXT: # %bb.1: # %if.then
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB0_2: # %if.else
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp false double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -41,24 +45,28 @@ if.else:
define void @br_fcmp_oeq(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_oeq:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: feq.d a0, fa0, fa1
; RV32IFD-NEXT: bnez a0, .LBB1_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB1_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_oeq:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: feq.d a0, fa0, fa1
; RV64IFD-NEXT: bnez a0, .LBB1_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB1_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp oeq double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -75,24 +83,28 @@ if.then:
define void @br_fcmp_oeq_alt(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_oeq_alt:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: feq.d a0, fa0, fa1
; RV32IFD-NEXT: bnez a0, .LBB2_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB2_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_oeq_alt:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: feq.d a0, fa0, fa1
; RV64IFD-NEXT: bnez a0, .LBB2_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB2_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp oeq double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -106,24 +118,28 @@ if.else:
define void @br_fcmp_ogt(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ogt:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa1, fa0
; RV32IFD-NEXT: bnez a0, .LBB3_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB3_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ogt:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa1, fa0
; RV64IFD-NEXT: bnez a0, .LBB3_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB3_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ogt double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -137,24 +153,28 @@ if.then:
define void @br_fcmp_oge(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_oge:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fle.d a0, fa1, fa0
; RV32IFD-NEXT: bnez a0, .LBB4_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB4_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_oge:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fle.d a0, fa1, fa0
; RV64IFD-NEXT: bnez a0, .LBB4_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB4_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp oge double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -168,24 +188,28 @@ if.then:
define void @br_fcmp_olt(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_olt:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa0, fa1
; RV32IFD-NEXT: bnez a0, .LBB5_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB5_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_olt:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa0, fa1
; RV64IFD-NEXT: bnez a0, .LBB5_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB5_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp olt double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -199,24 +223,28 @@ if.then:
define void @br_fcmp_ole(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ole:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fle.d a0, fa0, fa1
; RV32IFD-NEXT: bnez a0, .LBB6_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB6_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ole:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fle.d a0, fa0, fa1
; RV64IFD-NEXT: bnez a0, .LBB6_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB6_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ole double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -230,28 +258,32 @@ if.then:
define void @br_fcmp_one(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_one:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa0, fa1
; RV32IFD-NEXT: flt.d a1, fa1, fa0
; RV32IFD-NEXT: or a0, a1, a0
; RV32IFD-NEXT: bnez a0, .LBB7_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB7_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_one:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa0, fa1
; RV64IFD-NEXT: flt.d a1, fa1, fa0
; RV64IFD-NEXT: or a0, a1, a0
; RV64IFD-NEXT: bnez a0, .LBB7_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB7_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp one double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -265,28 +297,32 @@ if.then:
define void @br_fcmp_ord(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ord:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: feq.d a0, fa1, fa1
; RV32IFD-NEXT: feq.d a1, fa0, fa0
; RV32IFD-NEXT: and a0, a1, a0
; RV32IFD-NEXT: bnez a0, .LBB8_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB8_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ord:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: feq.d a0, fa1, fa1
; RV64IFD-NEXT: feq.d a1, fa0, fa0
; RV64IFD-NEXT: and a0, a1, a0
; RV64IFD-NEXT: bnez a0, .LBB8_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB8_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ord double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -300,28 +336,32 @@ if.then:
define void @br_fcmp_ueq(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ueq:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa0, fa1
; RV32IFD-NEXT: flt.d a1, fa1, fa0
; RV32IFD-NEXT: or a0, a1, a0
; RV32IFD-NEXT: beqz a0, .LBB9_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB9_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ueq:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa0, fa1
; RV64IFD-NEXT: flt.d a1, fa1, fa0
; RV64IFD-NEXT: or a0, a1, a0
; RV64IFD-NEXT: beqz a0, .LBB9_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB9_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ueq double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -335,24 +375,28 @@ if.then:
define void @br_fcmp_ugt(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ugt:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fle.d a0, fa0, fa1
; RV32IFD-NEXT: beqz a0, .LBB10_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB10_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ugt:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fle.d a0, fa0, fa1
; RV64IFD-NEXT: beqz a0, .LBB10_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB10_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ugt double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -366,24 +410,28 @@ if.then:
define void @br_fcmp_uge(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_uge:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa0, fa1
; RV32IFD-NEXT: beqz a0, .LBB11_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB11_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_uge:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa0, fa1
; RV64IFD-NEXT: beqz a0, .LBB11_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB11_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp uge double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -397,24 +445,28 @@ if.then:
define void @br_fcmp_ult(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ult:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: fle.d a0, fa1, fa0
; RV32IFD-NEXT: beqz a0, .LBB12_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB12_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ult:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: fle.d a0, fa1, fa0
; RV64IFD-NEXT: beqz a0, .LBB12_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB12_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ult double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -428,24 +480,28 @@ if.then:
define void @br_fcmp_ule(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_ule:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: flt.d a0, fa1, fa0
; RV32IFD-NEXT: beqz a0, .LBB13_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB13_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_ule:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: flt.d a0, fa1, fa0
; RV64IFD-NEXT: beqz a0, .LBB13_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB13_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp ule double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -459,24 +515,28 @@ if.then:
define void @br_fcmp_une(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_une:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: feq.d a0, fa0, fa1
; RV32IFD-NEXT: beqz a0, .LBB14_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB14_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_une:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: feq.d a0, fa0, fa1
; RV64IFD-NEXT: beqz a0, .LBB14_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB14_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp une double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -491,28 +551,32 @@ define void @br_fcmp_uno(double %a, double %b) nounwind {
; TODO: sltiu+bne -> beq
; RV32IFD-LABEL: br_fcmp_uno:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: feq.d a0, fa1, fa1
; RV32IFD-NEXT: feq.d a1, fa0, fa0
; RV32IFD-NEXT: and a0, a1, a0
; RV32IFD-NEXT: beqz a0, .LBB15_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB15_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_uno:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: feq.d a0, fa1, fa1
; RV64IFD-NEXT: feq.d a1, fa0, fa0
; RV64IFD-NEXT: and a0, a1, a0
; RV64IFD-NEXT: beqz a0, .LBB15_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB15_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp uno double %a, %b
br i1 %1, label %if.then, label %if.else
@@ -526,24 +590,28 @@ if.then:
define void @br_fcmp_true(double %a, double %b) nounwind {
; RV32IFD-LABEL: br_fcmp_true:
; RV32IFD: # %bb.0:
+; RV32IFD-NEXT: addi sp, sp, -16
+; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: li a0, 1
; RV32IFD-NEXT: bnez a0, .LBB16_2
; RV32IFD-NEXT: # %bb.1: # %if.else
+; RV32IFD-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IFD-NEXT: addi sp, sp, 16
; RV32IFD-NEXT: ret
; RV32IFD-NEXT: .LBB16_2: # %if.then
-; RV32IFD-NEXT: addi sp, sp, -16
-; RV32IFD-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IFD-NEXT: call abort@plt
;
; RV64IFD-LABEL: br_fcmp_true:
; RV64IFD: # %bb.0:
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: li a0, 1
; RV64IFD-NEXT: bnez a0, .LBB16_2
; RV64IFD-NEXT: # %bb.1: # %if.else
+; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
; RV64IFD-NEXT: .LBB16_2: # %if.then
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: call abort@plt
%1 = fcmp true double %a, %b
br i1 %1, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
index 6c5bd89b6b0e..f612b533008e 100644
--- a/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
+++ b/llvm/test/CodeGen/RISCV/double-stack-spill-restore.ll
@@ -35,12 +35,12 @@ define double @func(double %d, i32 %n) nounwind {
;
; RV64IFD-LABEL: func:
; RV64IFD: # %bb.0: # %entry
+; RV64IFD-NEXT: addi sp, sp, -16
+; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: sext.w a2, a1
; RV64IFD-NEXT: fmv.d.x ft0, a0
; RV64IFD-NEXT: beqz a2, .LBB0_2
; RV64IFD-NEXT: # %bb.1: # %if.else
-; RV64IFD-NEXT: addi sp, sp, -16
-; RV64IFD-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IFD-NEXT: addiw a1, a1, -1
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: fsd ft0, 0(sp) # 8-byte Folded Spill
@@ -48,13 +48,11 @@ define double @func(double %d, i32 %n) nounwind {
; RV64IFD-NEXT: fmv.d.x ft0, a0
; RV64IFD-NEXT: fld ft1, 0(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: fadd.d ft0, ft0, ft1
+; RV64IFD-NEXT: .LBB0_2: # %return
; RV64IFD-NEXT: fmv.x.d a0, ft0
; RV64IFD-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64IFD-NEXT: addi sp, sp, 16
; RV64IFD-NEXT: ret
-; RV64IFD-NEXT: .LBB0_2: # %return
-; RV64IFD-NEXT: fmv.x.d a0, ft0
-; RV64IFD-NEXT: ret
entry:
%cmp = icmp eq i32 %n, 0
br i1 %cmp, label %return, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
index cac2df760cfb..05c9ce89c5a8 100644
--- a/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/float-br-fcmp.ll
@@ -11,24 +11,28 @@ declare float @dummy(float)
define void @br_fcmp_false(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_false:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: li a0, 1
; RV32IF-NEXT: bnez a0, .LBB0_2
; RV32IF-NEXT: # %bb.1: # %if.then
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB0_2: # %if.else
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_false:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: li a0, 1
; RV64IF-NEXT: bnez a0, .LBB0_2
; RV64IF-NEXT: # %bb.1: # %if.then
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB0_2: # %if.else
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp false float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -42,24 +46,28 @@ if.else:
define void @br_fcmp_oeq(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_oeq:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: feq.s a0, fa0, fa1
; RV32IF-NEXT: bnez a0, .LBB1_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB1_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_oeq:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: feq.s a0, fa0, fa1
; RV64IF-NEXT: bnez a0, .LBB1_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB1_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -76,24 +84,28 @@ if.then:
define void @br_fcmp_oeq_alt(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_oeq_alt:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: feq.s a0, fa0, fa1
; RV32IF-NEXT: bnez a0, .LBB2_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB2_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_oeq_alt:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: feq.s a0, fa0, fa1
; RV64IF-NEXT: bnez a0, .LBB2_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB2_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp oeq float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -107,24 +119,28 @@ if.else:
define void @br_fcmp_ogt(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ogt:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa1, fa0
; RV32IF-NEXT: bnez a0, .LBB3_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB3_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ogt:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa1, fa0
; RV64IF-NEXT: bnez a0, .LBB3_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB3_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ogt float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -138,24 +154,28 @@ if.then:
define void @br_fcmp_oge(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_oge:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fle.s a0, fa1, fa0
; RV32IF-NEXT: bnez a0, .LBB4_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB4_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_oge:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fle.s a0, fa1, fa0
; RV64IF-NEXT: bnez a0, .LBB4_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB4_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp oge float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -169,24 +189,28 @@ if.then:
define void @br_fcmp_olt(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_olt:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa0, fa1
; RV32IF-NEXT: bnez a0, .LBB5_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB5_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_olt:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa0, fa1
; RV64IF-NEXT: bnez a0, .LBB5_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB5_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp olt float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -200,24 +224,28 @@ if.then:
define void @br_fcmp_ole(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ole:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fle.s a0, fa0, fa1
; RV32IF-NEXT: bnez a0, .LBB6_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB6_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ole:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fle.s a0, fa0, fa1
; RV64IF-NEXT: bnez a0, .LBB6_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB6_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ole float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -231,28 +259,32 @@ if.then:
define void @br_fcmp_one(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_one:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa0, fa1
; RV32IF-NEXT: flt.s a1, fa1, fa0
; RV32IF-NEXT: or a0, a1, a0
; RV32IF-NEXT: bnez a0, .LBB7_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB7_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_one:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa0, fa1
; RV64IF-NEXT: flt.s a1, fa1, fa0
; RV64IF-NEXT: or a0, a1, a0
; RV64IF-NEXT: bnez a0, .LBB7_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB7_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp one float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -266,28 +298,32 @@ if.then:
define void @br_fcmp_ord(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ord:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: feq.s a0, fa1, fa1
; RV32IF-NEXT: feq.s a1, fa0, fa0
; RV32IF-NEXT: and a0, a1, a0
; RV32IF-NEXT: bnez a0, .LBB8_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB8_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ord:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: feq.s a0, fa1, fa1
; RV64IF-NEXT: feq.s a1, fa0, fa0
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: bnez a0, .LBB8_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB8_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ord float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -301,28 +337,32 @@ if.then:
define void @br_fcmp_ueq(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ueq:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa0, fa1
; RV32IF-NEXT: flt.s a1, fa1, fa0
; RV32IF-NEXT: or a0, a1, a0
; RV32IF-NEXT: beqz a0, .LBB9_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB9_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ueq:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa0, fa1
; RV64IF-NEXT: flt.s a1, fa1, fa0
; RV64IF-NEXT: or a0, a1, a0
; RV64IF-NEXT: beqz a0, .LBB9_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB9_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ueq float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -336,24 +376,28 @@ if.then:
define void @br_fcmp_ugt(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ugt:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fle.s a0, fa0, fa1
; RV32IF-NEXT: beqz a0, .LBB10_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB10_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ugt:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fle.s a0, fa0, fa1
; RV64IF-NEXT: beqz a0, .LBB10_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB10_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ugt float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -367,24 +411,28 @@ if.then:
define void @br_fcmp_uge(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_uge:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa0, fa1
; RV32IF-NEXT: beqz a0, .LBB11_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB11_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_uge:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa0, fa1
; RV64IF-NEXT: beqz a0, .LBB11_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB11_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp uge float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -398,24 +446,28 @@ if.then:
define void @br_fcmp_ult(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ult:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: fle.s a0, fa1, fa0
; RV32IF-NEXT: beqz a0, .LBB12_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB12_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ult:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: fle.s a0, fa1, fa0
; RV64IF-NEXT: beqz a0, .LBB12_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB12_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ult float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -429,24 +481,28 @@ if.then:
define void @br_fcmp_ule(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_ule:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: flt.s a0, fa1, fa0
; RV32IF-NEXT: beqz a0, .LBB13_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB13_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_ule:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: flt.s a0, fa1, fa0
; RV64IF-NEXT: beqz a0, .LBB13_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB13_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp ule float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -460,24 +516,28 @@ if.then:
define void @br_fcmp_une(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_une:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: feq.s a0, fa0, fa1
; RV32IF-NEXT: beqz a0, .LBB14_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB14_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_une:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: feq.s a0, fa0, fa1
; RV64IF-NEXT: beqz a0, .LBB14_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB14_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp une float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -492,28 +552,32 @@ define void @br_fcmp_uno(float %a, float %b) nounwind {
; TODO: sltiu+bne -> beq
; RV32IF-LABEL: br_fcmp_uno:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: feq.s a0, fa1, fa1
; RV32IF-NEXT: feq.s a1, fa0, fa0
; RV32IF-NEXT: and a0, a1, a0
; RV32IF-NEXT: beqz a0, .LBB15_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB15_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_uno:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: feq.s a0, fa1, fa1
; RV64IF-NEXT: feq.s a1, fa0, fa0
; RV64IF-NEXT: and a0, a1, a0
; RV64IF-NEXT: beqz a0, .LBB15_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB15_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp uno float %a, %b
br i1 %1, label %if.then, label %if.else
@@ -527,24 +591,28 @@ if.then:
define void @br_fcmp_true(float %a, float %b) nounwind {
; RV32IF-LABEL: br_fcmp_true:
; RV32IF: # %bb.0:
+; RV32IF-NEXT: addi sp, sp, -16
+; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: li a0, 1
; RV32IF-NEXT: bnez a0, .LBB16_2
; RV32IF-NEXT: # %bb.1: # %if.else
+; RV32IF-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IF-NEXT: addi sp, sp, 16
; RV32IF-NEXT: ret
; RV32IF-NEXT: .LBB16_2: # %if.then
-; RV32IF-NEXT: addi sp, sp, -16
-; RV32IF-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IF-NEXT: call abort@plt
;
; RV64IF-LABEL: br_fcmp_true:
; RV64IF: # %bb.0:
+; RV64IF-NEXT: addi sp, sp, -16
+; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: li a0, 1
; RV64IF-NEXT: bnez a0, .LBB16_2
; RV64IF-NEXT: # %bb.1: # %if.else
+; RV64IF-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IF-NEXT: addi sp, sp, 16
; RV64IF-NEXT: ret
; RV64IF-NEXT: .LBB16_2: # %if.then
-; RV64IF-NEXT: addi sp, sp, -16
-; RV64IF-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IF-NEXT: call abort@plt
%1 = fcmp true float %a, %b
br i1 %1, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/frame-info.ll b/llvm/test/CodeGen/RISCV/frame-info.ll
index 0a28df1cf5fb..c872fb3774b5 100644
--- a/llvm/test/CodeGen/RISCV/frame-info.ll
+++ b/llvm/test/CodeGen/RISCV/frame-info.ll
@@ -146,15 +146,17 @@ entry:
define void @branch_and_tail_call(i1 %a) {
; RV32-LABEL: branch_and_tail_call:
; RV32: # %bb.0:
+; RV32-NEXT: addi sp, sp, -16
+; RV32-NEXT: .cfi_def_cfa_offset 16
+; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: andi a0, a0, 1
; RV32-NEXT: beqz a0, .LBB2_2
; RV32-NEXT: # %bb.1: # %blue_pill
+; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-NEXT: addi sp, sp, 16
; RV32-NEXT: tail callee1@plt
; RV32-NEXT: .LBB2_2: # %red_pill
-; RV32-NEXT: addi sp, sp, -16
-; RV32-NEXT: .cfi_def_cfa_offset 16
-; RV32-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
-; RV32-NEXT: .cfi_offset ra, -4
; RV32-NEXT: call callee2@plt
; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-NEXT: addi sp, sp, 16
@@ -162,15 +164,17 @@ define void @branch_and_tail_call(i1 %a) {
;
; RV64-LABEL: branch_and_tail_call:
; RV64: # %bb.0:
+; RV64-NEXT: addi sp, sp, -16
+; RV64-NEXT: .cfi_def_cfa_offset 16
+; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: andi a0, a0, 1
; RV64-NEXT: beqz a0, .LBB2_2
; RV64-NEXT: # %bb.1: # %blue_pill
+; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-NEXT: addi sp, sp, 16
; RV64-NEXT: tail callee1@plt
; RV64-NEXT: .LBB2_2: # %red_pill
-; RV64-NEXT: addi sp, sp, -16
-; RV64-NEXT: .cfi_def_cfa_offset 16
-; RV64-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
-; RV64-NEXT: .cfi_offset ra, -8
; RV64-NEXT: call callee2@plt
; RV64-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-NEXT: addi sp, sp, 16
@@ -178,11 +182,6 @@ define void @branch_and_tail_call(i1 %a) {
;
; RV32-WITHFP-LABEL: branch_and_tail_call:
; RV32-WITHFP: # %bb.0:
-; RV32-WITHFP-NEXT: andi a0, a0, 1
-; RV32-WITHFP-NEXT: beqz a0, .LBB2_2
-; RV32-WITHFP-NEXT: # %bb.1: # %blue_pill
-; RV32-WITHFP-NEXT: tail callee1@plt
-; RV32-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV32-WITHFP-NEXT: addi sp, sp, -16
; RV32-WITHFP-NEXT: .cfi_def_cfa_offset 16
; RV32-WITHFP-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
@@ -191,6 +190,14 @@ define void @branch_and_tail_call(i1 %a) {
; RV32-WITHFP-NEXT: .cfi_offset s0, -8
; RV32-WITHFP-NEXT: addi s0, sp, 16
; RV32-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV32-WITHFP-NEXT: andi a0, a0, 1
+; RV32-WITHFP-NEXT: beqz a0, .LBB2_2
+; RV32-WITHFP-NEXT: # %bb.1: # %blue_pill
+; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
+; RV32-WITHFP-NEXT: addi sp, sp, 16
+; RV32-WITHFP-NEXT: tail callee1@plt
+; RV32-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV32-WITHFP-NEXT: call callee2@plt
; RV32-WITHFP-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32-WITHFP-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
@@ -199,11 +206,6 @@ define void @branch_and_tail_call(i1 %a) {
;
; RV64-WITHFP-LABEL: branch_and_tail_call:
; RV64-WITHFP: # %bb.0:
-; RV64-WITHFP-NEXT: andi a0, a0, 1
-; RV64-WITHFP-NEXT: beqz a0, .LBB2_2
-; RV64-WITHFP-NEXT: # %bb.1: # %blue_pill
-; RV64-WITHFP-NEXT: tail callee1@plt
-; RV64-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV64-WITHFP-NEXT: addi sp, sp, -16
; RV64-WITHFP-NEXT: .cfi_def_cfa_offset 16
; RV64-WITHFP-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
@@ -212,6 +214,14 @@ define void @branch_and_tail_call(i1 %a) {
; RV64-WITHFP-NEXT: .cfi_offset s0, -16
; RV64-WITHFP-NEXT: addi s0, sp, 16
; RV64-WITHFP-NEXT: .cfi_def_cfa s0, 0
+; RV64-WITHFP-NEXT: andi a0, a0, 1
+; RV64-WITHFP-NEXT: beqz a0, .LBB2_2
+; RV64-WITHFP-NEXT: # %bb.1: # %blue_pill
+; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
+; RV64-WITHFP-NEXT: addi sp, sp, 16
+; RV64-WITHFP-NEXT: tail callee1@plt
+; RV64-WITHFP-NEXT: .LBB2_2: # %red_pill
; RV64-WITHFP-NEXT: call callee2@plt
; RV64-WITHFP-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64-WITHFP-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
diff --git a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
index ccff49cd98cf..5f9108f46d30 100644
--- a/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
+++ b/llvm/test/CodeGen/RISCV/half-br-fcmp.ll
@@ -11,24 +11,28 @@ declare half @dummy(half)
define void @br_fcmp_false(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_false:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: li a0, 1
; RV32IZFH-NEXT: bnez a0, .LBB0_2
; RV32IZFH-NEXT: # %bb.1: # %if.then
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB0_2: # %if.else
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_false:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: li a0, 1
; RV64IZFH-NEXT: bnez a0, .LBB0_2
; RV64IZFH-NEXT: # %bb.1: # %if.then
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB0_2: # %if.else
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp false half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -42,24 +46,28 @@ if.else:
define void @br_fcmp_oeq(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_oeq:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: feq.h a0, fa0, fa1
; RV32IZFH-NEXT: bnez a0, .LBB1_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB1_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_oeq:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: feq.h a0, fa0, fa1
; RV64IZFH-NEXT: bnez a0, .LBB1_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB1_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp oeq half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -76,24 +84,28 @@ if.then:
define void @br_fcmp_oeq_alt(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_oeq_alt:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: feq.h a0, fa0, fa1
; RV32IZFH-NEXT: bnez a0, .LBB2_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB2_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_oeq_alt:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: feq.h a0, fa0, fa1
; RV64IZFH-NEXT: bnez a0, .LBB2_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB2_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp oeq half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -107,24 +119,28 @@ if.else:
define void @br_fcmp_ogt(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ogt:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa1, fa0
; RV32IZFH-NEXT: bnez a0, .LBB3_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB3_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ogt:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa1, fa0
; RV64IZFH-NEXT: bnez a0, .LBB3_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB3_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ogt half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -138,24 +154,28 @@ if.then:
define void @br_fcmp_oge(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_oge:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fle.h a0, fa1, fa0
; RV32IZFH-NEXT: bnez a0, .LBB4_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB4_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_oge:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fle.h a0, fa1, fa0
; RV64IZFH-NEXT: bnez a0, .LBB4_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB4_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp oge half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -169,24 +189,28 @@ if.then:
define void @br_fcmp_olt(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_olt:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa0, fa1
; RV32IZFH-NEXT: bnez a0, .LBB5_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB5_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_olt:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa0, fa1
; RV64IZFH-NEXT: bnez a0, .LBB5_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB5_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp olt half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -200,24 +224,28 @@ if.then:
define void @br_fcmp_ole(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ole:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fle.h a0, fa0, fa1
; RV32IZFH-NEXT: bnez a0, .LBB6_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB6_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ole:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fle.h a0, fa0, fa1
; RV64IZFH-NEXT: bnez a0, .LBB6_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB6_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ole half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -231,28 +259,32 @@ if.then:
define void @br_fcmp_one(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_one:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa0, fa1
; RV32IZFH-NEXT: flt.h a1, fa1, fa0
; RV32IZFH-NEXT: or a0, a1, a0
; RV32IZFH-NEXT: bnez a0, .LBB7_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB7_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_one:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa0, fa1
; RV64IZFH-NEXT: flt.h a1, fa1, fa0
; RV64IZFH-NEXT: or a0, a1, a0
; RV64IZFH-NEXT: bnez a0, .LBB7_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB7_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp one half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -266,28 +298,32 @@ if.then:
define void @br_fcmp_ord(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ord:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
; RV32IZFH-NEXT: bnez a0, .LBB8_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB8_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ord:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
; RV64IZFH-NEXT: bnez a0, .LBB8_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB8_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ord half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -301,28 +337,32 @@ if.then:
define void @br_fcmp_ueq(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ueq:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa0, fa1
; RV32IZFH-NEXT: flt.h a1, fa1, fa0
; RV32IZFH-NEXT: or a0, a1, a0
; RV32IZFH-NEXT: beqz a0, .LBB9_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB9_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ueq:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa0, fa1
; RV64IZFH-NEXT: flt.h a1, fa1, fa0
; RV64IZFH-NEXT: or a0, a1, a0
; RV64IZFH-NEXT: beqz a0, .LBB9_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB9_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ueq half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -336,24 +376,28 @@ if.then:
define void @br_fcmp_ugt(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ugt:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fle.h a0, fa0, fa1
; RV32IZFH-NEXT: beqz a0, .LBB10_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB10_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ugt:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fle.h a0, fa0, fa1
; RV64IZFH-NEXT: beqz a0, .LBB10_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB10_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ugt half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -367,24 +411,28 @@ if.then:
define void @br_fcmp_uge(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_uge:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa0, fa1
; RV32IZFH-NEXT: beqz a0, .LBB11_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB11_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_uge:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa0, fa1
; RV64IZFH-NEXT: beqz a0, .LBB11_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB11_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp uge half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -398,24 +446,28 @@ if.then:
define void @br_fcmp_ult(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ult:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: fle.h a0, fa1, fa0
; RV32IZFH-NEXT: beqz a0, .LBB12_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB12_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ult:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: fle.h a0, fa1, fa0
; RV64IZFH-NEXT: beqz a0, .LBB12_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB12_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ult half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -429,24 +481,28 @@ if.then:
define void @br_fcmp_ule(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_ule:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: flt.h a0, fa1, fa0
; RV32IZFH-NEXT: beqz a0, .LBB13_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB13_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_ule:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: flt.h a0, fa1, fa0
; RV64IZFH-NEXT: beqz a0, .LBB13_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB13_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp ule half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -460,24 +516,28 @@ if.then:
define void @br_fcmp_une(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_une:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: feq.h a0, fa0, fa1
; RV32IZFH-NEXT: beqz a0, .LBB14_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB14_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_une:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: feq.h a0, fa0, fa1
; RV64IZFH-NEXT: beqz a0, .LBB14_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB14_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp une half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -492,28 +552,32 @@ define void @br_fcmp_uno(half %a, half %b) nounwind {
; TODO: sltiu+bne -> beq
; RV32IZFH-LABEL: br_fcmp_uno:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: feq.h a0, fa1, fa1
; RV32IZFH-NEXT: feq.h a1, fa0, fa0
; RV32IZFH-NEXT: and a0, a1, a0
; RV32IZFH-NEXT: beqz a0, .LBB15_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB15_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_uno:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: feq.h a0, fa1, fa1
; RV64IZFH-NEXT: feq.h a1, fa0, fa0
; RV64IZFH-NEXT: and a0, a1, a0
; RV64IZFH-NEXT: beqz a0, .LBB15_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB15_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp uno half %a, %b
br i1 %1, label %if.then, label %if.else
@@ -527,24 +591,28 @@ if.then:
define void @br_fcmp_true(half %a, half %b) nounwind {
; RV32IZFH-LABEL: br_fcmp_true:
; RV32IZFH: # %bb.0:
+; RV32IZFH-NEXT: addi sp, sp, -16
+; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: li a0, 1
; RV32IZFH-NEXT: bnez a0, .LBB16_2
; RV32IZFH-NEXT: # %bb.1: # %if.else
+; RV32IZFH-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32IZFH-NEXT: addi sp, sp, 16
; RV32IZFH-NEXT: ret
; RV32IZFH-NEXT: .LBB16_2: # %if.then
-; RV32IZFH-NEXT: addi sp, sp, -16
-; RV32IZFH-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32IZFH-NEXT: call abort@plt
;
; RV64IZFH-LABEL: br_fcmp_true:
; RV64IZFH: # %bb.0:
+; RV64IZFH-NEXT: addi sp, sp, -16
+; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: li a0, 1
; RV64IZFH-NEXT: bnez a0, .LBB16_2
; RV64IZFH-NEXT: # %bb.1: # %if.else
+; RV64IZFH-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64IZFH-NEXT: addi sp, sp, 16
; RV64IZFH-NEXT: ret
; RV64IZFH-NEXT: .LBB16_2: # %if.then
-; RV64IZFH-NEXT: addi sp, sp, -16
-; RV64IZFH-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64IZFH-NEXT: call abort@plt
%1 = fcmp true half %a, %b
br i1 %1, label %if.then, label %if.else
diff --git a/llvm/test/CodeGen/RISCV/rv32zbb.ll b/llvm/test/CodeGen/RISCV/rv32zbb.ll
index d5d6b3ab0b89..64c9e35146f6 100644
--- a/llvm/test/CodeGen/RISCV/rv32zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv32zbb.ll
@@ -9,10 +9,10 @@ declare i32 @llvm.ctlz.i32(i32, i1)
define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-LABEL: ctlz_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB0_2
-; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: beqz a0, .LBB0_2
+; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: srli a1, a0, 1
; RV32I-NEXT: or a0, a0, a1
; RV32I-NEXT: srli a1, a0, 2
@@ -44,11 +44,12 @@ define i32 @ctlz_i32(i32 %a) nounwind {
; RV32I-NEXT: addi a1, a1, 257
; RV32I-NEXT: call __mulsi3@plt
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
-; RV32I-NEXT: ret
+; RV32I-NEXT: j .LBB0_3
; RV32I-NEXT: .LBB0_2:
; RV32I-NEXT: li a0, 32
+; RV32I-NEXT: .LBB0_3: # %cond.end
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: ctlz_i32:
@@ -171,10 +172,10 @@ declare i32 @llvm.cttz.i32(i32, i1)
define i32 @cttz_i32(i32 %a) nounwind {
; RV32I-LABEL: cttz_i32:
; RV32I: # %bb.0:
-; RV32I-NEXT: beqz a0, .LBB2_2
-; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi sp, sp, -16
; RV32I-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
+; RV32I-NEXT: beqz a0, .LBB2_2
+; RV32I-NEXT: # %bb.1: # %cond.false
; RV32I-NEXT: addi a1, a0, -1
; RV32I-NEXT: not a0, a0
; RV32I-NEXT: and a0, a0, a1
@@ -198,11 +199,12 @@ define i32 @cttz_i32(i32 %a) nounwind {
; RV32I-NEXT: addi a1, a1, 257
; RV32I-NEXT: call __mulsi3@plt
; RV32I-NEXT: srli a0, a0, 24
-; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
-; RV32I-NEXT: addi sp, sp, 16
-; RV32I-NEXT: ret
+; RV32I-NEXT: j .LBB2_3
; RV32I-NEXT: .LBB2_2:
; RV32I-NEXT: li a0, 32
+; RV32I-NEXT: .LBB2_3: # %cond.end
+; RV32I-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-NEXT: addi sp, sp, 16
; RV32I-NEXT: ret
;
; RV32ZBB-LABEL: cttz_i32:
diff --git a/llvm/test/CodeGen/RISCV/rv64zbb.ll b/llvm/test/CodeGen/RISCV/rv64zbb.ll
index 27d2a232c5c9..a5e3061f5095 100644
--- a/llvm/test/CodeGen/RISCV/rv64zbb.ll
+++ b/llvm/test/CodeGen/RISCV/rv64zbb.ll
@@ -9,10 +9,10 @@ declare i32 @llvm.ctlz.i32(i32, i1)
define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-LABEL: ctlz_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB0_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB0_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -44,11 +44,12 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB0_3
; RV64I-NEXT: .LBB0_2:
; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: .LBB0_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctlz_i32:
@@ -62,10 +63,10 @@ define signext i32 @ctlz_i32(i32 signext %a) nounwind {
define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-LABEL: log2_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB1_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB1_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srliw a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srliw a1, a0, 2
@@ -97,14 +98,14 @@ define signext i32 @log2_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: j .LBB1_3
; RV64I-NEXT: .LBB1_2:
; RV64I-NEXT: li a0, 32
; RV64I-NEXT: .LBB1_3: # %cond.end
; RV64I-NEXT: li a1, 31
; RV64I-NEXT: sub a0, a1, a0
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: log2_i32:
@@ -249,13 +250,13 @@ define signext i32 @findLastSet_i32(i32 signext %a) nounwind {
define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-LABEL: ctlz_lshr_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: srliw a0, a0, 1
-; RV64I-NEXT: beqz a0, .LBB4_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: .cfi_def_cfa_offset 16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-NEXT: .cfi_offset ra, -8
+; RV64I-NEXT: srliw a0, a0, 1
+; RV64I-NEXT: beqz a0, .LBB4_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -288,11 +289,12 @@ define i32 @ctlz_lshr_i32(i32 signext %a) {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB4_3
; RV64I-NEXT: .LBB4_2:
; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: .LBB4_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctlz_lshr_i32:
@@ -310,10 +312,10 @@ declare i64 @llvm.ctlz.i64(i64, i1)
define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-LABEL: ctlz_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB5_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB5_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: srli a1, a0, 1
; RV64I-NEXT: or a0, a0, a1
; RV64I-NEXT: srli a1, a0, 2
@@ -347,11 +349,12 @@ define i64 @ctlz_i64(i64 %a) nounwind {
; RV64I-NEXT: ld a1, %lo(.LCPI5_3)(a1)
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB5_3
; RV64I-NEXT: .LBB5_2:
; RV64I-NEXT: li a0, 64
+; RV64I-NEXT: .LBB5_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: ctlz_i64:
@@ -367,10 +370,10 @@ declare i32 @llvm.cttz.i32(i32, i1)
define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-LABEL: cttz_i32:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB6_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB6_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addiw a1, a0, -1
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: and a0, a0, a1
@@ -394,11 +397,12 @@ define signext i32 @cttz_i32(i32 signext %a) nounwind {
; RV64I-NEXT: addiw a1, a1, 257
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srliw a0, a0, 24
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB6_3
; RV64I-NEXT: .LBB6_2:
; RV64I-NEXT: li a0, 32
+; RV64I-NEXT: .LBB6_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: cttz_i32:
@@ -567,10 +571,10 @@ declare i64 @llvm.cttz.i64(i64, i1)
define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-LABEL: cttz_i64:
; RV64I: # %bb.0:
-; RV64I-NEXT: beqz a0, .LBB10_2
-; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi sp, sp, -16
; RV64I-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
+; RV64I-NEXT: beqz a0, .LBB10_2
+; RV64I-NEXT: # %bb.1: # %cond.false
; RV64I-NEXT: addi a1, a0, -1
; RV64I-NEXT: not a0, a0
; RV64I-NEXT: and a0, a0, a1
@@ -594,11 +598,12 @@ define i64 @cttz_i64(i64 %a) nounwind {
; RV64I-NEXT: ld a1, %lo(.LCPI10_3)(a1)
; RV64I-NEXT: call __muldi3@plt
; RV64I-NEXT: srli a0, a0, 56
-; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
-; RV64I-NEXT: addi sp, sp, 16
-; RV64I-NEXT: ret
+; RV64I-NEXT: j .LBB10_3
; RV64I-NEXT: .LBB10_2:
; RV64I-NEXT: li a0, 64
+; RV64I-NEXT: .LBB10_3: # %cond.end
+; RV64I-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-NEXT: addi sp, sp, 16
; RV64I-NEXT: ret
;
; RV64ZBB-LABEL: cttz_i64:
diff --git a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
index b222360c4c4e..1cb41692a8cc 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
+++ b/llvm/test/CodeGen/RISCV/rvv/vsetvli-insert-crossbb.mir
@@ -99,6 +99,31 @@
ret void
}
+ define void @redusum_loop(i32* nocapture noundef readonly %a, i32 noundef signext %n, i32* nocapture noundef writeonly %res) #0 {
+ entry:
+ br label %vector.body
+
+ vector.body: ; preds = %vector.body, %entry
+ %lsr.iv1 = phi i32* [ %scevgep, %vector.body ], [ %a, %entry ]
+ %lsr.iv = phi i64 [ %lsr.iv.next, %vector.body ], [ 2048, %entry ]
+ %vec.phi = phi <4 x i32> [ zeroinitializer, %entry ], [ %0, %vector.body ]
+ %lsr.iv12 = bitcast i32* %lsr.iv1 to <4 x i32>*
+ %wide.load = load <4 x i32>, <4 x i32>* %lsr.iv12, align 4
+ %0 = add <4 x i32> %wide.load, %vec.phi
+ %lsr.iv.next = add nsw i64 %lsr.iv, -4
+ %scevgep = getelementptr i32, i32* %lsr.iv1, i64 4
+ %1 = icmp eq i64 %lsr.iv.next, 0
+ br i1 %1, label %middle.block, label %vector.body
+
+ middle.block: ; preds = %vector.body
+ %2 = call i32 @llvm.vector.reduce.add.v4i32(<4 x i32> %0)
+ store i32 %2, i32* %res, align 4
+ ret void
+ }
+
+ ; Function Attrs: nofree nosync nounwind readnone willreturn
+ declare i32 @llvm.vector.reduce.add.v4i32(<4 x i32>)
+
; Function Attrs: nounwind readnone
declare <vscale x 1 x i64> @llvm.riscv.vadd.nxv1i64.nxv1i64.i64(<vscale x 1 x i64>, <vscale x 1 x i64>, i64) #1
@@ -567,8 +592,10 @@ body: |
; CHECK-NEXT: [[PseudoVADD_VX_M1_:%[0-9]+]]:vr = PseudoVADD_VX_M1 [[PseudoVID_V_M1_]], [[PHI]], -1, 6, implicit $vl, implicit $vtype
; CHECK-NEXT: [[MUL:%[0-9]+]]:gpr = MUL [[PHI]], [[SRLI]]
; CHECK-NEXT: [[ADD:%[0-9]+]]:gpr = ADD [[COPY]], [[MUL]]
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 87, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: PseudoVSE32_V_MF2 killed [[PseudoVADD_VX_M1_]], killed [[ADD]], -1, 5, implicit $vl, implicit $vtype
; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = ADDI [[PHI]], 1
+ ; CHECK-NEXT: dead $x0 = PseudoVSETVLIX0 killed $x0, 88, implicit-def $vl, implicit-def $vtype, implicit $vl
; CHECK-NEXT: BLTU [[ADDI]], [[COPY1]], %bb.1
; CHECK-NEXT: PseudoBR %bb.2
; CHECK-NEXT: {{ $}}
@@ -599,3 +626,112 @@ body: |
PseudoRET
...
+---
+name: redusum_loop
+alignment: 4
+tracksRegLiveness: true
+registers:
+ - { id: 0, class: gpr }
+ - { id: 1, class: gpr }
+ - { id: 2, class: vr }
+ - { id: 3, class: vr }
+ - { id: 4, class: gpr }
+ - { id: 5, class: gpr }
+ - { id: 6, class: gpr }
+ - { id: 7, class: gpr }
+ - { id: 8, class: gpr }
+ - { id: 9, class: gpr }
+ - { id: 10, class: vr }
+ - { id: 11, class: vr }
+ - { id: 12, class: vr }
+ - { id: 13, class: gpr }
+ - { id: 14, class: vr }
+ - { id: 15, class: vr }
+ - { id: 16, class: vr }
+ - { id: 17, class: vr }
+ - { id: 18, class: gpr }
+ - { id: 19, class: gpr }
+ - { id: 20, class: vr }
+ - { id: 21, class: vr }
+ - { id: 22, class: vr }
+ - { id: 23, class: vr }
+ - { id: 24, class: vr }
+liveins:
+ - { reg: '$x10', virtual-reg: '%6' }
+ - { reg: '$x12', virtual-reg: '%8' }
+frameInfo:
+ maxAlignment: 1
+machineFunctionInfo: {}
+body: |
+ ; CHECK-LABEL: name: redusum_loop
+ ; CHECK: bb.0.entry:
+ ; CHECK-NEXT: successors: %bb.1(0x80000000)
+ ; CHECK-NEXT: liveins: $x10, $x12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:gpr = COPY $x12
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:gpr = COPY $x10
+ ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 4, 80, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: [[PseudoVMV_V_I_M1_:%[0-9]+]]:vr = PseudoVMV_V_I_M1 0, 4, 5, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:vr = COPY [[PseudoVMV_V_I_M1_]]
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:vr = COPY [[COPY2]]
+ ; CHECK-NEXT: [[LUI:%[0-9]+]]:gpr = LUI 1
+ ; CHECK-NEXT: [[ADDIW:%[0-9]+]]:gpr = ADDIW killed [[LUI]], -2048
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1.vector.body:
+ ; CHECK-NEXT: successors: %bb.2(0x04000000), %bb.1(0x7c000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:gpr = PHI [[COPY1]], %bb.0, %5, %bb.1
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gpr = PHI [[ADDIW]], %bb.0, %4, %bb.1
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:vr = PHI [[COPY3]], %bb.0, %16, %bb.1
+ ; CHECK-NEXT: [[PseudoVLE32_V_M1_:%[0-9]+]]:vr = PseudoVLE32_V_M1 [[PHI]], 4, 5, implicit $vl, implicit $vtype :: (load (s128) from %ir.lsr.iv12, align 4)
+ ; CHECK-NEXT: [[PseudoVADD_VV_M1_:%[0-9]+]]:vr = PseudoVADD_VV_M1 killed [[PseudoVLE32_V_M1_]], [[PHI2]], 4, 5, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gpr = nsw ADDI [[PHI1]], -4
+ ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gpr = ADDI [[PHI]], 16
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gpr = COPY $x0
+ ; CHECK-NEXT: BNE [[ADDI]], [[COPY4]], %bb.1
+ ; CHECK-NEXT: PseudoBR %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2.middle.block:
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gpr = COPY $x0
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[PseudoVMV_S_X_M1_:%[0-9]+]]:vr = PseudoVMV_S_X_M1 [[DEF]], [[COPY5]], 1, 5, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:vr = IMPLICIT_DEF
+ ; CHECK-NEXT: [[PseudoVREDSUM_VS_M1_:%[0-9]+]]:vr = PseudoVREDSUM_VS_M1 [[DEF1]], [[PseudoVADD_VV_M1_]], killed [[PseudoVMV_S_X_M1_]], 4, 5, implicit $vl, implicit $vtype
+ ; CHECK-NEXT: dead $x0 = PseudoVSETIVLI 1, 80, implicit-def $vl, implicit-def $vtype
+ ; CHECK-NEXT: PseudoVSE32_V_M1 killed [[PseudoVREDSUM_VS_M1_]], [[COPY]], 1, 5, implicit $vl, implicit $vtype :: (store (s32) into %ir.res)
+ ; CHECK-NEXT: PseudoRET
+ bb.0.entry:
+ liveins: $x10, $x12
+
+ %8:gpr = COPY $x12
+ %6:gpr = COPY $x10
+ %11:vr = PseudoVMV_V_I_M1 0, 4, 5
+ %12:vr = COPY %11
+ %10:vr = COPY %12
+ %13:gpr = LUI 1
+ %9:gpr = ADDIW killed %13, -2048
+
+ bb.1.vector.body:
+ successors: %bb.2(0x04000000), %bb.1(0x7c000000)
+
+ %0:gpr = PHI %6, %bb.0, %5, %bb.1
+ %1:gpr = PHI %9, %bb.0, %4, %bb.1
+ %2:vr = PHI %10, %bb.0, %16, %bb.1
+ %14:vr = PseudoVLE32_V_M1 %0, 4, 5 :: (load (s128) from %ir.lsr.iv12, align 4)
+ %16:vr = PseudoVADD_VV_M1 killed %14, %2, 4, 5
+ %4:gpr = nsw ADDI %1, -4
+ %5:gpr = ADDI %0, 16
+ %18:gpr = COPY $x0
+ BNE %4, %18, %bb.1
+ PseudoBR %bb.2
+
+ bb.2.middle.block:
+ %19:gpr = COPY $x0
+ %21:vr = IMPLICIT_DEF
+ %20:vr = PseudoVMV_S_X_M1 %21, %19, 1, 5
+ %24:vr = IMPLICIT_DEF
+ %23:vr = PseudoVREDSUM_VS_M1 %24, %16, killed %20, 4, 5
+ PseudoVSE32_V_M1 killed %23, %8, 1, 5 :: (store (s32) into %ir.res)
+ PseudoRET
+
+...
diff --git a/llvm/test/CodeGen/RISCV/shrinkwrap.ll b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
index e64cc5273e56..d41269c841e8 100644
--- a/llvm/test/CodeGen/RISCV/shrinkwrap.ll
+++ b/llvm/test/CodeGen/RISCV/shrinkwrap.ll
@@ -26,35 +26,39 @@ define void @eliminate_restore(i32 %n) nounwind {
;
; RV32I-SW-LABEL: eliminate_restore:
; RV32I-SW: # %bb.0:
+; RV32I-SW-NEXT: addi sp, sp, -16
+; RV32I-SW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-SW-NEXT: li a1, 32
; RV32I-SW-NEXT: bgeu a1, a0, .LBB0_2
; RV32I-SW-NEXT: # %bb.1: # %if.end
+; RV32I-SW-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
+; RV32I-SW-NEXT: addi sp, sp, 16
; RV32I-SW-NEXT: ret
; RV32I-SW-NEXT: .LBB0_2: # %if.then
-; RV32I-SW-NEXT: addi sp, sp, -16
-; RV32I-SW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-SW-NEXT: call abort@plt
;
; RV32I-SW-SR-LABEL: eliminate_restore:
; RV32I-SW-SR: # %bb.0:
+; RV32I-SW-SR-NEXT: call t0, __riscv_save_0
; RV32I-SW-SR-NEXT: li a1, 32
; RV32I-SW-SR-NEXT: bgeu a1, a0, .LBB0_2
; RV32I-SW-SR-NEXT: # %bb.1: # %if.end
-; RV32I-SW-SR-NEXT: ret
+; RV32I-SW-SR-NEXT: tail __riscv_restore_0
; RV32I-SW-SR-NEXT: .LBB0_2: # %if.then
-; RV32I-SW-SR-NEXT: call t0, __riscv_save_0
; RV32I-SW-SR-NEXT: call abort@plt
;
; RV64I-SW-LABEL: eliminate_restore:
; RV64I-SW: # %bb.0:
+; RV64I-SW-NEXT: addi sp, sp, -16
+; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-SW-NEXT: sext.w a0, a0
; RV64I-SW-NEXT: li a1, 32
; RV64I-SW-NEXT: bgeu a1, a0, .LBB0_2
; RV64I-SW-NEXT: # %bb.1: # %if.end
+; RV64I-SW-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
+; RV64I-SW-NEXT: addi sp, sp, 16
; RV64I-SW-NEXT: ret
; RV64I-SW-NEXT: .LBB0_2: # %if.then
-; RV64I-SW-NEXT: addi sp, sp, -16
-; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-SW-NEXT: call abort@plt
%cmp = icmp ule i32 %n, 32
br i1 %cmp, label %if.then, label %if.end
@@ -93,52 +97,51 @@ define void @conditional_alloca(i32 %n) nounwind {
;
; RV32I-SW-LABEL: conditional_alloca:
; RV32I-SW: # %bb.0:
-; RV32I-SW-NEXT: li a1, 32
-; RV32I-SW-NEXT: bltu a1, a0, .LBB1_2
-; RV32I-SW-NEXT: # %bb.1: # %if.then
; RV32I-SW-NEXT: addi sp, sp, -16
; RV32I-SW-NEXT: sw ra, 12(sp) # 4-byte Folded Spill
; RV32I-SW-NEXT: sw s0, 8(sp) # 4-byte Folded Spill
; RV32I-SW-NEXT: addi s0, sp, 16
+; RV32I-SW-NEXT: li a1, 32
+; RV32I-SW-NEXT: bltu a1, a0, .LBB1_2
+; RV32I-SW-NEXT: # %bb.1: # %if.then
; RV32I-SW-NEXT: addi a0, a0, 15
; RV32I-SW-NEXT: andi a0, a0, -16
; RV32I-SW-NEXT: sub a0, sp, a0
; RV32I-SW-NEXT: mv sp, a0
; RV32I-SW-NEXT: call notdead@plt
+; RV32I-SW-NEXT: .LBB1_2: # %if.end
; RV32I-SW-NEXT: addi sp, s0, -16
; RV32I-SW-NEXT: lw ra, 12(sp) # 4-byte Folded Reload
; RV32I-SW-NEXT: lw s0, 8(sp) # 4-byte Folded Reload
; RV32I-SW-NEXT: addi sp, sp, 16
-; RV32I-SW-NEXT: .LBB1_2: # %if.end
; RV32I-SW-NEXT: ret
;
; RV32I-SW-SR-LABEL: conditional_alloca:
; RV32I-SW-SR: # %bb.0:
+; RV32I-SW-SR-NEXT: call t0, __riscv_save_1
+; RV32I-SW-SR-NEXT: addi s0, sp, 16
; RV32I-SW-SR-NEXT: li a1, 32
; RV32I-SW-SR-NEXT: bltu a1, a0, .LBB1_2
; RV32I-SW-SR-NEXT: # %bb.1: # %if.then
-; RV32I-SW-SR-NEXT: call t0, __riscv_save_1
-; RV32I-SW-SR-NEXT: addi s0, sp, 16
; RV32I-SW-SR-NEXT: addi a0, a0, 15
; RV32I-SW-SR-NEXT: andi a0, a0, -16
; RV32I-SW-SR-NEXT: sub a0, sp, a0
; RV32I-SW-SR-NEXT: mv sp, a0
; RV32I-SW-SR-NEXT: call notdead@plt
+; RV32I-SW-SR-NEXT: .LBB1_2: # %if.end
; RV32I-SW-SR-NEXT: addi sp, s0, -16
; RV32I-SW-SR-NEXT: tail __riscv_restore_1
-; RV32I-SW-SR-NEXT: .LBB1_2: # %if.end
-; RV32I-SW-SR-NEXT: ret
;
; RV64I-SW-LABEL: conditional_alloca:
; RV64I-SW: # %bb.0:
-; RV64I-SW-NEXT: sext.w a1, a0
-; RV64I-SW-NEXT: li a2, 32
-; RV64I-SW-NEXT: bltu a2, a1, .LBB1_2
-; RV64I-SW-NEXT: # %bb.1: # %if.then
; RV64I-SW-NEXT: addi sp, sp, -16
; RV64I-SW-NEXT: sd ra, 8(sp) # 8-byte Folded Spill
; RV64I-SW-NEXT: sd s0, 0(sp) # 8-byte Folded Spill
; RV64I-SW-NEXT: addi s0, sp, 16
+; RV64I-SW-NEXT: sext.w a1, a0
+; RV64I-SW-NEXT: li a2, 32
+; RV64I-SW-NEXT: bltu a2, a1, .LBB1_2
+; RV64I-SW-NEXT: # %bb.1: # %if.then
; RV64I-SW-NEXT: slli a0, a0, 32
; RV64I-SW-NEXT: srli a0, a0, 32
; RV64I-SW-NEXT: addi a0, a0, 15
@@ -146,11 +149,11 @@ define void @conditional_alloca(i32 %n) nounwind {
; RV64I-SW-NEXT: sub a0, sp, a0
; RV64I-SW-NEXT: mv sp, a0
; RV64I-SW-NEXT: call notdead@plt
+; RV64I-SW-NEXT: .LBB1_2: # %if.end
; RV64I-SW-NEXT: addi sp, s0, -16
; RV64I-SW-NEXT: ld ra, 8(sp) # 8-byte Folded Reload
; RV64I-SW-NEXT: ld s0, 0(sp) # 8-byte Folded Reload
; RV64I-SW-NEXT: addi sp, sp, 16
-; RV64I-SW-NEXT: .LBB1_2: # %if.end
; RV64I-SW-NEXT: ret
%cmp = icmp ule i32 %n, 32
br i1 %cmp, label %if.then, label %if.end
diff --git a/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll b/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll
new file mode 100644
index 000000000000..91aec53c4721
--- /dev/null
+++ b/llvm/test/CodeGen/RISCV/unroll-loop-cse.ll
@@ -0,0 +1,84 @@
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -mtriple=riscv32 -verify-machineinstrs < %s | FileCheck %s
+; RUN: llc -mtriple=riscv64 -verify-machineinstrs < %s | FileCheck %s
+
+@x = global [6 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5], align 4
+@check = global [6 x i32] [i32 0, i32 1, i32 2, i32 3, i32 4, i32 5], align 4
+
+; This test case checks whether the base address of an array is repeatedly
+; rematerialised within a unrolled loop.
+define signext i32 @unroll_loop_cse() {
+; CHECK-LABEL: unroll_loop_cse:
+; CHECK: # %bb.0:
+; CHECK-NEXT: lui a1, %hi(x)
+; CHECK-NEXT: lw a3, %lo(x)(a1)
+; CHECK-NEXT: lui a2, %hi(check)
+; CHECK-NEXT: lw a4, %lo(check)(a2)
+; CHECK-NEXT: li a0, 1
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.1:
+; CHECK-NEXT: addi a1, a1, %lo(x)
+; CHECK-NEXT: lw a3, 4(a1)
+; CHECK-NEXT: addi a2, a2, %lo(check)
+; CHECK-NEXT: lw a4, 4(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.2:
+; CHECK-NEXT: lw a3, 8(a1)
+; CHECK-NEXT: lw a4, 8(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.3:
+; CHECK-NEXT: lw a3, 12(a1)
+; CHECK-NEXT: lw a4, 12(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: lw a3, 16(a1)
+; CHECK-NEXT: lw a4, 16(a2)
+; CHECK-NEXT: bne a3, a4, .LBB0_6
+; CHECK-NEXT: # %bb.5:
+; CHECK-NEXT: lw a0, 20(a1)
+; CHECK-NEXT: lw a1, 20(a2)
+; CHECK-NEXT: xor a0, a0, a1
+; CHECK-NEXT: snez a0, a0
+; CHECK-NEXT: .LBB0_6:
+; CHECK-NEXT: ret
+ %1 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 0), align 4
+ %2 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 0), align 4
+ %3 = icmp eq i32 %1, %2
+ br i1 %3, label %4, label %25
+
+4:
+ %5 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 1), align 4
+ %6 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 1), align 4
+ %7 = icmp eq i32 %5, %6
+ br i1 %7, label %8, label %25
+
+8:
+ %9 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 2), align 4
+ %10 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 2), align 4
+ %11 = icmp eq i32 %9, %10
+ br i1 %11, label %12, label %25
+
+12:
+ %13 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 3), align 4
+ %14 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 3), align 4
+ %15 = icmp eq i32 %13, %14
+ br i1 %15, label %16, label %25
+
+16:
+ %17 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 4), align 4
+ %18 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 4), align 4
+ %19 = icmp eq i32 %17, %18
+ br i1 %19, label %20, label %25
+
+20:
+ %21 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @x, i64 0, i64 5), align 4
+ %22 = load i32, i32* getelementptr inbounds ([6 x i32], [6 x i32]* @check, i64 0, i64 5), align 4
+ %23 = icmp ne i32 %21, %22
+ %24 = zext i1 %23 to i32
+ br label %25
+
+25:
+ %26 = phi i32 [ 1, %0 ], [ 1, %4 ], [ 1, %8 ], [ 1, %12 ], [ 1, %16 ], [ %24, %20 ]
+ ret i32 %26
+}
+
diff --git a/llvm/test/CodeGen/X86/avg.ll b/llvm/test/CodeGen/X86/avg.ll
index 002aeab665f6..22171ecc2eab 100644
--- a/llvm/test/CodeGen/X86/avg.ll
+++ b/llvm/test/CodeGen/X86/avg.ll
@@ -64,17 +64,31 @@ define void @avg_v8i8(<8 x i8>* %a, <8 x i8>* %b) nounwind {
define void @avg_v16i8(<16 x i8>* %a, <16 x i8>* %b) nounwind {
; SSE2-LABEL: avg_v16i8:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: pavgb (%rdi), %xmm0
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: pavgb (%rsi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
-; AVX-LABEL: avg_v16i8:
-; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa (%rsi), %xmm0
-; AVX-NEXT: vpavgb (%rdi), %xmm0, %xmm0
-; AVX-NEXT: vmovdqu %xmm0, (%rax)
-; AVX-NEXT: retq
+; AVX1-LABEL: avg_v16i8:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vmovdqu %xmm0, (%rax)
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: avg_v16i8:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa (%rdi), %xmm0
+; AVX2-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX2-NEXT: vmovdqu %xmm0, (%rax)
+; AVX2-NEXT: retq
+;
+; AVX512-LABEL: avg_v16i8:
+; AVX512: # %bb.0:
+; AVX512-NEXT: vmovdqa (%rsi), %xmm0
+; AVX512-NEXT: vpavgb (%rdi), %xmm0, %xmm0
+; AVX512-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512-NEXT: retq
%1 = load <16 x i8>, <16 x i8>* %a
%2 = load <16 x i8>, <16 x i8>* %b
%3 = zext <16 x i8> %1 to <16 x i32>
@@ -152,26 +166,26 @@ define void @avg_v32i8(<32 x i8>* %a, <32 x i8>* %b) nounwind {
;
; AVX1-LABEL: avg_v32i8:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vpavgb (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgb 16(%rdi), %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vpavgb (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgb 16(%rsi), %xmm1, %xmm1
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v32i8:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vpavgb (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
;
; AVX512-LABEL: avg_v32i8:
; AVX512: # %bb.0:
-; AVX512-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512-NEXT: vpavgb (%rdi), %ymm0, %ymm0
+; AVX512-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512-NEXT: vpavgb (%rsi), %ymm0, %ymm0
; AVX512-NEXT: vmovdqu %ymm0, (%rax)
; AVX512-NEXT: vzeroupper
; AVX512-NEXT: retq
@@ -302,10 +316,10 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
;
; AVX512F-LABEL: avg_v64i8:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vpavgb (%rdi), %ymm0, %ymm0
-; AVX512F-NEXT: vpavgb 32(%rdi), %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT: vpavgb (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vpavgb 32(%rsi), %ymm1, %ymm1
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
; AVX512F-NEXT: vzeroupper
@@ -313,8 +327,8 @@ define void @avg_v64i8(<64 x i8>* %a, <64 x i8>* %b) nounwind {
;
; AVX512BW-LABEL: avg_v64i8:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT: vpavgb (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpavgb (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -361,15 +375,15 @@ define void @avg_v4i16(<4 x i16>* %a, <4 x i16>* %b) nounwind {
define void @avg_v8i16(<8 x i16>* %a, <8 x i16>* %b) nounwind {
; SSE2-LABEL: avg_v8i16:
; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa (%rsi), %xmm0
-; SSE2-NEXT: pavgw (%rdi), %xmm0
+; SSE2-NEXT: movdqa (%rdi), %xmm0
+; SSE2-NEXT: pavgw (%rsi), %xmm0
; SSE2-NEXT: movdqu %xmm0, (%rax)
; SSE2-NEXT: retq
;
; AVX-LABEL: avg_v8i16:
; AVX: # %bb.0:
-; AVX-NEXT: vmovdqa (%rsi), %xmm0
-; AVX-NEXT: vpavgw (%rdi), %xmm0, %xmm0
+; AVX-NEXT: vmovdqa (%rdi), %xmm0
+; AVX-NEXT: vpavgw (%rsi), %xmm0, %xmm0
; AVX-NEXT: vmovdqu %xmm0, (%rax)
; AVX-NEXT: retq
%1 = load <8 x i16>, <8 x i16>* %a
@@ -397,18 +411,18 @@ define void @avg_v16i16(<16 x i16>* %a, <16 x i16>* %b) nounwind {
;
; AVX1-LABEL: avg_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa (%rsi), %xmm0
-; AVX1-NEXT: vmovdqa 16(%rsi), %xmm1
-; AVX1-NEXT: vpavgw (%rdi), %xmm0, %xmm0
-; AVX1-NEXT: vpavgw 16(%rdi), %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa (%rdi), %xmm0
+; AVX1-NEXT: vmovdqa 16(%rdi), %xmm1
+; AVX1-NEXT: vpavgw (%rsi), %xmm0, %xmm0
+; AVX1-NEXT: vpavgw 16(%rsi), %xmm1, %xmm1
; AVX1-NEXT: vmovdqu %xmm1, (%rax)
; AVX1-NEXT: vmovdqu %xmm0, (%rax)
; AVX1-NEXT: retq
;
; AVX2-LABEL: avg_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa (%rsi), %ymm0
-; AVX2-NEXT: vpavgw (%rdi), %ymm0, %ymm0
+; AVX2-NEXT: vmovdqa (%rdi), %ymm0
+; AVX2-NEXT: vpavgw (%rsi), %ymm0, %ymm0
; AVX2-NEXT: vmovdqu %ymm0, (%rax)
; AVX2-NEXT: vzeroupper
; AVX2-NEXT: retq
@@ -478,10 +492,10 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
;
; AVX512F-LABEL: avg_v32i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
-; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0
-; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa (%rdi), %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rdi), %ymm1
+; AVX512F-NEXT: vpavgw (%rsi), %ymm0, %ymm0
+; AVX512F-NEXT: vpavgw 32(%rsi), %ymm1, %ymm1
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
; AVX512F-NEXT: vzeroupper
@@ -489,8 +503,8 @@ define void @avg_v32i16(<32 x i16>* %a, <32 x i16>* %b) nounwind {
;
; AVX512BW-LABEL: avg_v32i16:
; AVX512BW: # %bb.0:
-; AVX512BW-NEXT: vmovdqa64 (%rsi), %zmm0
-; AVX512BW-NEXT: vpavgw (%rdi), %zmm0, %zmm0
+; AVX512BW-NEXT: vmovdqa64 (%rdi), %zmm0
+; AVX512BW-NEXT: vpavgw (%rsi), %zmm0, %zmm0
; AVX512BW-NEXT: vmovdqu64 %zmm0, (%rax)
; AVX512BW-NEXT: vzeroupper
; AVX512BW-NEXT: retq
@@ -561,15 +575,15 @@ define void @avg_v40i16(<40 x i16>* %a, <40 x i16>* %b) nounwind {
;
; AVX512F-LABEL: avg_v40i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa 64(%rsi), %xmm0
-; AVX512F-NEXT: vpavgw 64(%rdi), %xmm0, %xmm0
-; AVX512F-NEXT: vmovdqa (%rsi), %ymm1
-; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm2
-; AVX512F-NEXT: vpavgw (%rdi), %ymm1, %ymm1
-; AVX512F-NEXT: vpavgw 32(%rdi), %ymm2, %ymm2
-; AVX512F-NEXT: vmovdqu %ymm2, (%rax)
+; AVX512F-NEXT: vmovdqa (%rsi), %ymm0
+; AVX512F-NEXT: vmovdqa 32(%rsi), %ymm1
+; AVX512F-NEXT: vpavgw (%rdi), %ymm0, %ymm0
+; AVX512F-NEXT: vpavgw 32(%rdi), %ymm1, %ymm1
+; AVX512F-NEXT: vmovdqa 64(%rsi), %xmm2
+; AVX512F-NEXT: vpavgw 64(%rdi), %xmm2, %xmm2
; AVX512F-NEXT: vmovdqu %ymm1, (%rax)
-; AVX512F-NEXT: vmovdqu %xmm0, (%rax)
+; AVX512F-NEXT: vmovdqu %ymm0, (%rax)
+; AVX512F-NEXT: vmovdqu %xmm2, (%rax)
; AVX512F-NEXT: vzeroupper
; AVX512F-NEXT: retq
;
@@ -2645,7 +2659,7 @@ define <8 x i16> @PR52131_pavg_chain(<8 x i16> %a, <8 x i16> %b, <8 x i16> %c) {
;
; AVX-LABEL: PR52131_pavg_chain:
; AVX: # %bb.0:
-; AVX-NEXT: vpavgw %xmm0, %xmm1, %xmm0
+; AVX-NEXT: vpavgw %xmm1, %xmm0, %xmm0
; AVX-NEXT: vpavgw %xmm2, %xmm0, %xmm0
; AVX-NEXT: retq
%i = zext <8 x i16> %a to <8 x i32>
diff --git a/llvm/test/CodeGen/X86/avx512-insert-extract.ll b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
index 4ae0d273daae..78bb6d5f1a63 100644
--- a/llvm/test/CodeGen/X86/avx512-insert-extract.ll
+++ b/llvm/test/CodeGen/X86/avx512-insert-extract.ll
@@ -668,7 +668,6 @@ define <32 x i16> @insert_v32i16(<32 x i16> %x, i16 %y, i16* %ptr) {
; KNL-NEXT: vinserti32x4 $0, %xmm1, %zmm0, %zmm1
; KNL-NEXT: vmovd %edi, %xmm0
; KNL-NEXT: vpbroadcastw %xmm0, %ymm0
-; KNL-NEXT: vinserti64x4 $1, %ymm0, %zmm0, %zmm0
; KNL-NEXT: vpternlogq $216, {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %zmm1, %zmm0
; KNL-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/combine-pavg.ll b/llvm/test/CodeGen/X86/combine-pavg.ll
index c6e1c242565b..bfeebb473c74 100644
--- a/llvm/test/CodeGen/X86/combine-pavg.ll
+++ b/llvm/test/CodeGen/X86/combine-pavg.ll
@@ -1,7 +1,7 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefix=SSE
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX
-; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefixes=AVX,AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=AVX,AVX2
declare <16 x i8> @llvm.x86.sse2.pavg.b(<16 x i8>, <16 x i8>) nounwind readnone
declare <8 x i16> @llvm.x86.sse2.pavg.w(<8 x i16>, <8 x i16>) nounwind readnone
@@ -37,6 +37,36 @@ define <16 x i8> @combine_pavgw_knownbits(<8 x i16> %a0, <8 x i16> %a1, <8 x i16
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: packuswb %xmm3, %xmm0
; SSE-NEXT: retq
+;
+; AVX1-LABEL: combine_pavgw_knownbits:
+; AVX1: # %bb.0:
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31]
+; AVX1-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX1-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpand %xmm4, %xmm2, %xmm1
+; AVX1-NEXT: vpand %xmm4, %xmm3, %xmm2
+; AVX1-NEXT: vpavgw %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm2 = [255,0,255,0,255,0,255,0,255,0,255,0,255,0,255,0]
+; AVX1-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX1-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: retq
+;
+; AVX2-LABEL: combine_pavgw_knownbits:
+; AVX2: # %bb.0:
+; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = [31,31,31,31,31,31,31,31]
+; AVX2-NEXT: vpand %xmm4, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm4, %xmm1, %xmm1
+; AVX2-NEXT: vpavgw %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: vpand %xmm4, %xmm2, %xmm1
+; AVX2-NEXT: vpand %xmm4, %xmm3, %xmm2
+; AVX2-NEXT: vpavgw %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpbroadcastw {{.*#+}} xmm2 = [255,255,255,255,255,255,255,255]
+; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
+; AVX2-NEXT: vpand %xmm2, %xmm0, %xmm0
+; AVX2-NEXT: vpackuswb %xmm1, %xmm0, %xmm0
+; AVX2-NEXT: retq
%m0 = and <8 x i16> %a0, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
%m1 = and <8 x i16> %a1, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
%m2 = and <8 x i16> %a2, <i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31, i16 31>
diff --git a/llvm/test/CodeGen/X86/combine-rotates.ll b/llvm/test/CodeGen/X86/combine-rotates.ll
index ed9581d71177..eade7a8b126e 100644
--- a/llvm/test/CodeGen/X86/combine-rotates.ll
+++ b/llvm/test/CodeGen/X86/combine-rotates.ll
@@ -386,10 +386,8 @@ define <4 x i32> @rotl_binop_shuffle(<4 x i32>, <4 x i32>) {
define <4 x i32> @rotr_binop_shuffle(<4 x i32>, <4 x i32>) {
; SSE2-LABEL: rotr_binop_shuffle:
; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psllq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: psllq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/combine-udiv.ll b/llvm/test/CodeGen/X86/combine-udiv.ll
index 2906fe6da47b..f657430239b8 100644
--- a/llvm/test/CodeGen/X86/combine-udiv.ll
+++ b/llvm/test/CodeGen/X86/combine-udiv.ll
@@ -638,10 +638,7 @@ define <16 x i8> @combine_vec_udiv_nonuniform4(<16 x i8> %x) {
; SSE2-NEXT: pxor %xmm3, %xmm3
; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0],xmm3[0],xmm0[1],xmm3[1],xmm0[2],xmm3[2],xmm0[3],xmm3[3],xmm0[4],xmm3[4],xmm0[5],xmm3[5],xmm0[6],xmm3[6],xmm0[7],xmm3[7]
; SSE2-NEXT: pmullw {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm0, %xmm0
-; SSE2-NEXT: psrlw $7, %xmm0
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE2-NEXT: psrlw $15, %xmm0
; SSE2-NEXT: pandn %xmm0, %xmm1
; SSE2-NEXT: por %xmm2, %xmm1
; SSE2-NEXT: movdqa %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/extractelement-load.ll b/llvm/test/CodeGen/X86/extractelement-load.ll
index 2a7ed3a8b4e7..138d60b05ba9 100644
--- a/llvm/test/CodeGen/X86/extractelement-load.ll
+++ b/llvm/test/CodeGen/X86/extractelement-load.ll
@@ -1,8 +1,8 @@
; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=X32-SSE2
; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+ssse3 | FileCheck %s --check-prefixes=X64,X64-SSSE3
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX
-; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX1
+; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=X64,X64-AVX,X64-AVX2
target datalayout = "e-m:e-i64:64-f80:128-n8:16:32:64-S128"
@@ -268,7 +268,7 @@ entry:
}
; Test for bad extractions from a VBROADCAST_LOAD of the <2 x i16> non-uniform constant bitcast as <4 x i32>.
-define void @subextract_broadcast_load_constant(<2 x i16>* nocapture %0, i16* nocapture %1, i16* nocapture %2) {
+define void @subextract_broadcast_load_constant(<2 x i16>* nocapture %0, i16* nocapture %1, i16* nocapture %2) nounwind {
; X32-SSE2-LABEL: subextract_broadcast_load_constant:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
@@ -301,7 +301,7 @@ define void @subextract_broadcast_load_constant(<2 x i16>* nocapture %0, i16* no
ret void
}
-define i32 @multi_use_load_scalarization(<4 x i32>* %p) {
+define i32 @multi_use_load_scalarization(<4 x i32>* %p) nounwind {
; X32-SSE2-LABEL: multi_use_load_scalarization:
; X32-SSE2: # %bb.0:
; X32-SSE2-NEXT: movl {{[0-9]+}}(%esp), %ecx
@@ -335,3 +335,146 @@ define i32 @multi_use_load_scalarization(<4 x i32>* %p) {
%r = extractelement <4 x i32> %v, i64 0
ret i32 %r
}
+
+@n1 = local_unnamed_addr global <8 x i32> <i32 0, i32 42, i32 6, i32 0, i32 0, i32 0, i32 0, i32 0>, align 32
+@zero = internal unnamed_addr global <8 x i32> zeroinitializer, align 32
+
+define i32 @main() nounwind {
+; X32-SSE2-LABEL: main:
+; X32-SSE2: # %bb.0:
+; X32-SSE2-NEXT: pushl %ebp
+; X32-SSE2-NEXT: movl %esp, %ebp
+; X32-SSE2-NEXT: pushl %esi
+; X32-SSE2-NEXT: andl $-32, %esp
+; X32-SSE2-NEXT: subl $64, %esp
+; X32-SSE2-NEXT: movdqa zero, %xmm0
+; X32-SSE2-NEXT: movaps n1+16, %xmm1
+; X32-SSE2-NEXT: movaps n1, %xmm2
+; X32-SSE2-NEXT: movaps %xmm2, zero
+; X32-SSE2-NEXT: movaps %xmm1, zero+16
+; X32-SSE2-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
+; X32-SSE2-NEXT: movaps %xmm1, {{[0-9]+}}(%esp)
+; X32-SSE2-NEXT: movaps %xmm1, (%esp)
+; X32-SSE2-NEXT: movdqa (%esp), %xmm1
+; X32-SSE2-NEXT: movaps {{[0-9]+}}(%esp), %xmm2
+; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; X32-SSE2-NEXT: movd %xmm2, %eax
+; X32-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; X32-SSE2-NEXT: movd %xmm2, %ecx
+; X32-SSE2-NEXT: xorl %edx, %edx
+; X32-SSE2-NEXT: divl %ecx
+; X32-SSE2-NEXT: movl %eax, %ecx
+; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X32-SSE2-NEXT: movd %xmm0, %eax
+; X32-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
+; X32-SSE2-NEXT: movd %xmm0, %esi
+; X32-SSE2-NEXT: xorl %edx, %edx
+; X32-SSE2-NEXT: divl %esi
+; X32-SSE2-NEXT: addl %ecx, %eax
+; X32-SSE2-NEXT: leal -4(%ebp), %esp
+; X32-SSE2-NEXT: popl %esi
+; X32-SSE2-NEXT: popl %ebp
+; X32-SSE2-NEXT: retl
+;
+; X64-SSSE3-LABEL: main:
+; X64-SSSE3: # %bb.0:
+; X64-SSSE3-NEXT: pushq %rbp
+; X64-SSSE3-NEXT: movq %rsp, %rbp
+; X64-SSSE3-NEXT: andq $-32, %rsp
+; X64-SSSE3-NEXT: subq $64, %rsp
+; X64-SSSE3-NEXT: movdqa zero(%rip), %xmm0
+; X64-SSSE3-NEXT: movq n1@GOTPCREL(%rip), %rax
+; X64-SSSE3-NEXT: movaps (%rax), %xmm1
+; X64-SSSE3-NEXT: movaps 16(%rax), %xmm2
+; X64-SSSE3-NEXT: movaps %xmm1, zero(%rip)
+; X64-SSSE3-NEXT: movaps %xmm2, zero+16(%rip)
+; X64-SSSE3-NEXT: movaps {{.*#+}} xmm1 = [2,2,2,2]
+; X64-SSSE3-NEXT: movaps %xmm1, {{[0-9]+}}(%rsp)
+; X64-SSSE3-NEXT: movaps %xmm1, (%rsp)
+; X64-SSSE3-NEXT: movdqa (%rsp), %xmm1
+; X64-SSSE3-NEXT: movaps {{[0-9]+}}(%rsp), %xmm2
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,3,2,3]
+; X64-SSSE3-NEXT: movd %xmm2, %eax
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
+; X64-SSSE3-NEXT: movd %xmm2, %ecx
+; X64-SSSE3-NEXT: xorl %edx, %edx
+; X64-SSSE3-NEXT: divl %ecx
+; X64-SSSE3-NEXT: movl %eax, %ecx
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm0[1,1,1,1]
+; X64-SSSE3-NEXT: movd %xmm0, %eax
+; X64-SSSE3-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
+; X64-SSSE3-NEXT: movd %xmm0, %esi
+; X64-SSSE3-NEXT: xorl %edx, %edx
+; X64-SSSE3-NEXT: divl %esi
+; X64-SSSE3-NEXT: addl %ecx, %eax
+; X64-SSSE3-NEXT: movq %rbp, %rsp
+; X64-SSSE3-NEXT: popq %rbp
+; X64-SSSE3-NEXT: retq
+;
+; X64-AVX1-LABEL: main:
+; X64-AVX1: # %bb.0:
+; X64-AVX1-NEXT: pushq %rbp
+; X64-AVX1-NEXT: movq %rsp, %rbp
+; X64-AVX1-NEXT: andq $-32, %rsp
+; X64-AVX1-NEXT: subq $64, %rsp
+; X64-AVX1-NEXT: movq n1@GOTPCREL(%rip), %rax
+; X64-AVX1-NEXT: vmovaps (%rax), %ymm0
+; X64-AVX1-NEXT: vmovaps zero(%rip), %xmm1
+; X64-AVX1-NEXT: vmovaps %ymm0, zero(%rip)
+; X64-AVX1-NEXT: vmovaps {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
+; X64-AVX1-NEXT: vmovaps %ymm0, (%rsp)
+; X64-AVX1-NEXT: vmovaps (%rsp), %ymm0
+; X64-AVX1-NEXT: vextractps $2, %xmm1, %eax
+; X64-AVX1-NEXT: vextractps $2, %xmm0, %ecx
+; X64-AVX1-NEXT: xorl %edx, %edx
+; X64-AVX1-NEXT: divl %ecx
+; X64-AVX1-NEXT: movl %eax, %ecx
+; X64-AVX1-NEXT: vextractps $1, %xmm1, %eax
+; X64-AVX1-NEXT: vextractps $1, %xmm0, %esi
+; X64-AVX1-NEXT: xorl %edx, %edx
+; X64-AVX1-NEXT: divl %esi
+; X64-AVX1-NEXT: addl %ecx, %eax
+; X64-AVX1-NEXT: movq %rbp, %rsp
+; X64-AVX1-NEXT: popq %rbp
+; X64-AVX1-NEXT: vzeroupper
+; X64-AVX1-NEXT: retq
+;
+; X64-AVX2-LABEL: main:
+; X64-AVX2: # %bb.0:
+; X64-AVX2-NEXT: pushq %rbp
+; X64-AVX2-NEXT: movq %rsp, %rbp
+; X64-AVX2-NEXT: andq $-32, %rsp
+; X64-AVX2-NEXT: subq $64, %rsp
+; X64-AVX2-NEXT: movq n1@GOTPCREL(%rip), %rax
+; X64-AVX2-NEXT: vmovaps (%rax), %ymm0
+; X64-AVX2-NEXT: vmovaps zero(%rip), %xmm1
+; X64-AVX2-NEXT: vmovaps %ymm0, zero(%rip)
+; X64-AVX2-NEXT: vbroadcastss {{.*#+}} ymm0 = [2,2,2,2,2,2,2,2]
+; X64-AVX2-NEXT: vmovaps %ymm0, (%rsp)
+; X64-AVX2-NEXT: vmovaps (%rsp), %ymm0
+; X64-AVX2-NEXT: vextractps $2, %xmm1, %eax
+; X64-AVX2-NEXT: vextractps $2, %xmm0, %ecx
+; X64-AVX2-NEXT: xorl %edx, %edx
+; X64-AVX2-NEXT: divl %ecx
+; X64-AVX2-NEXT: movl %eax, %ecx
+; X64-AVX2-NEXT: vextractps $1, %xmm1, %eax
+; X64-AVX2-NEXT: vextractps $1, %xmm0, %esi
+; X64-AVX2-NEXT: xorl %edx, %edx
+; X64-AVX2-NEXT: divl %esi
+; X64-AVX2-NEXT: addl %ecx, %eax
+; X64-AVX2-NEXT: movq %rbp, %rsp
+; X64-AVX2-NEXT: popq %rbp
+; X64-AVX2-NEXT: vzeroupper
+; X64-AVX2-NEXT: retq
+ %stackptr = alloca <8 x i32>, align 32
+ %z = load <8 x i32>, <8 x i32>* @zero, align 32
+ %t1 = load <8 x i32>, <8 x i32>* @n1, align 32
+ store <8 x i32> %t1, <8 x i32>* @zero, align 32
+ store volatile <8 x i32> <i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2, i32 2>, <8 x i32>* %stackptr, align 32
+ %stackload = load volatile <8 x i32>, <8 x i32>* %stackptr, align 32
+ %div = udiv <8 x i32> %z, %stackload
+ %e1 = extractelement <8 x i32> %div, i64 1
+ %e2 = extractelement <8 x i32> %div, i64 2
+ %r = add i32 %e1, %e2
+ ret i32 %r
+}
diff --git a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
index 2883beb6b01d..07094e2b93d0 100644
--- a/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
+++ b/llvm/test/CodeGen/X86/merge-consecutive-stores-nt.ll
@@ -1,4 +1,4 @@
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --no_x86_scrub_mem_shuffle
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=X86,X86-SSE2
; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+sse4a | FileCheck %s --check-prefixes=X86,X86-SSE4A
; RUN: llc < %s -mtriple=x86_64-unknown-unknown | FileCheck %s --check-prefixes=X64-SSE,X64-SSE2
@@ -332,10 +332,10 @@ define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1) no
; X86-SSE4A: # %bb.0:
; X86-SSE4A-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE4A-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; X86-SSE4A-NEXT: movsd (%ecx), %xmm0 # xmm0 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 8(%ecx), %xmm1 # xmm1 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 16(%ecx), %xmm2 # xmm2 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 24(%ecx), %xmm3 # xmm3 = mem[0],zero
; X86-SSE4A-NEXT: movntsd %xmm0, (%eax)
; X86-SSE4A-NEXT: movntsd %xmm1, 8(%eax)
; X86-SSE4A-NEXT: movntsd %xmm3, 24(%eax)
@@ -360,10 +360,10 @@ define void @merge_2_v4f32_align1_ntstore(<4 x float>* %a0, <4 x float>* %a1) no
;
; X64-SSE4A-LABEL: merge_2_v4f32_align1_ntstore:
; X64-SSE4A: # %bb.0:
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; X64-SSE4A-NEXT: movsd (%rdi), %xmm0 # xmm0 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 8(%rdi), %xmm1 # xmm1 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 16(%rdi), %xmm2 # xmm2 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 24(%rdi), %xmm3 # xmm3 = mem[0],zero
; X64-SSE4A-NEXT: movntsd %xmm0, (%rsi)
; X64-SSE4A-NEXT: movntsd %xmm1, 8(%rsi)
; X64-SSE4A-NEXT: movntsd %xmm3, 24(%rsi)
@@ -445,10 +445,10 @@ define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1) nounwind {
; X86-SSE4A: # %bb.0:
; X86-SSE4A-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE4A-NEXT: movl {{[0-9]+}}(%esp), %ecx
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; X86-SSE4A-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; X86-SSE4A-NEXT: movsd (%ecx), %xmm0 # xmm0 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 8(%ecx), %xmm1 # xmm1 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 16(%ecx), %xmm2 # xmm2 = mem[0],zero
+; X86-SSE4A-NEXT: movsd 24(%ecx), %xmm3 # xmm3 = mem[0],zero
; X86-SSE4A-NEXT: movntsd %xmm0, (%eax)
; X86-SSE4A-NEXT: movntsd %xmm1, 8(%eax)
; X86-SSE4A-NEXT: movntsd %xmm3, 24(%eax)
@@ -473,10 +473,10 @@ define void @merge_2_v4f32_align1(<4 x float>* %a0, <4 x float>* %a1) nounwind {
;
; X64-SSE4A-LABEL: merge_2_v4f32_align1:
; X64-SSE4A: # %bb.0:
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm0 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm1 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm2 = mem[0],zero
-; X64-SSE4A-NEXT: movsd {{.*#+}} xmm3 = mem[0],zero
+; X64-SSE4A-NEXT: movsd (%rdi), %xmm0 # xmm0 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 8(%rdi), %xmm1 # xmm1 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 16(%rdi), %xmm2 # xmm2 = mem[0],zero
+; X64-SSE4A-NEXT: movsd 24(%rdi), %xmm3 # xmm3 = mem[0],zero
; X64-SSE4A-NEXT: movntsd %xmm0, (%rsi)
; X64-SSE4A-NEXT: movntsd %xmm1, 8(%rsi)
; X64-SSE4A-NEXT: movntsd %xmm3, 24(%rsi)
diff --git a/llvm/test/CodeGen/X86/min-legal-vector-width.ll b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
index a6ccd731f186..da4439beac5a 100644
--- a/llvm/test/CodeGen/X86/min-legal-vector-width.ll
+++ b/llvm/test/CodeGen/X86/min-legal-vector-width.ll
@@ -72,8 +72,8 @@ define dso_local void @avg_v64i8_256(<64 x i8>* %a, <64 x i8>* %b) "min-legal-ve
define dso_local void @avg_v64i8_512(<64 x i8>* %a, <64 x i8>* %b) "min-legal-vector-width"="512" {
; CHECK-LABEL: avg_v64i8_512:
; CHECK: # %bb.0:
-; CHECK-NEXT: vmovdqa64 (%rsi), %zmm0
-; CHECK-NEXT: vpavgb (%rdi), %zmm0, %zmm0
+; CHECK-NEXT: vmovdqa64 (%rdi), %zmm0
+; CHECK-NEXT: vpavgb (%rsi), %zmm0, %zmm0
; CHECK-NEXT: vmovdqu64 %zmm0, (%rax)
; CHECK-NEXT: vzeroupper
; CHECK-NEXT: retq
@@ -1699,7 +1699,6 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind "mi
; CHECK: # %bb.0:
; CHECK-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; CHECK-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; CHECK-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; CHECK-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; CHECK-NEXT: vpsrlw $8, %ymm2, %ymm2
; CHECK-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
diff --git a/llvm/test/CodeGen/X86/oddshuffles.ll b/llvm/test/CodeGen/X86/oddshuffles.ll
index cb2e97e18e8a..c41fde4fa323 100644
--- a/llvm/test/CodeGen/X86/oddshuffles.ll
+++ b/llvm/test/CodeGen/X86/oddshuffles.ll
@@ -956,8 +956,7 @@ define void @interleave_24i16_out(<24 x i16>* %p, <8 x i16>* %q1, <8 x i16>* %q2
; SSE2-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE2-NEXT: pand %xmm6, %xmm2
; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
-; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE2-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
+; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
; SSE2-NEXT: pandn %xmm3, %xmm6
; SSE2-NEXT: por %xmm2, %xmm6
; SSE2-NEXT: movups %xmm1, (%rsi)
diff --git a/llvm/test/CodeGen/X86/psubus.ll b/llvm/test/CodeGen/X86/psubus.ll
index f0cb154a3011..61bc41774008 100644
--- a/llvm/test/CodeGen/X86/psubus.ll
+++ b/llvm/test/CodeGen/X86/psubus.ll
@@ -2672,73 +2672,73 @@ define <8 x i16> @test32(<8 x i16> %a0, <8 x i32> %a1) {
define <8 x i32> @test33(<8 x i32> %a0, <8 x i64> %a1) {
; SSE2OR3-LABEL: test33:
; SSE2OR3: # %bb.0:
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
; SSE2OR3-NEXT: movdqa %xmm3, %xmm6
-; SSE2OR3-NEXT: pxor %xmm9, %xmm6
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259455,9223372039002259455]
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm7
+; SSE2OR3-NEXT: pxor %xmm8, %xmm6
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm7
; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm8, %xmm6
+; SSE2OR3-NEXT: pand %xmm10, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2OR3-NEXT: por %xmm6, %xmm7
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm10
; SSE2OR3-NEXT: pand %xmm7, %xmm3
-; SSE2OR3-NEXT: pandn %xmm8, %xmm7
+; SSE2OR3-NEXT: pxor %xmm10, %xmm7
; SSE2OR3-NEXT: por %xmm3, %xmm7
; SSE2OR3-NEXT: movdqa %xmm2, %xmm3
-; SSE2OR3-NEXT: pxor %xmm9, %xmm3
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm6
+; SSE2OR3-NEXT: pxor %xmm8, %xmm3
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm6
; SSE2OR3-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm11, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2OR3-NEXT: por %xmm3, %xmm6
; SSE2OR3-NEXT: pand %xmm6, %xmm2
-; SSE2OR3-NEXT: pandn %xmm8, %xmm6
+; SSE2OR3-NEXT: pxor %xmm10, %xmm6
; SSE2OR3-NEXT: por %xmm2, %xmm6
; SSE2OR3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2OR3-NEXT: movdqa %xmm0, %xmm2
; SSE2OR3-NEXT: psubd %xmm6, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm6
-; SSE2OR3-NEXT: pxor %xmm9, %xmm0
+; SSE2OR3-NEXT: pxor %xmm8, %xmm6
+; SSE2OR3-NEXT: pxor %xmm8, %xmm0
; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm0
; SSE2OR3-NEXT: pand %xmm2, %xmm0
; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm3
+; SSE2OR3-NEXT: pxor %xmm8, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm3
; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm3
; SSE2OR3-NEXT: pand %xmm3, %xmm5
-; SSE2OR3-NEXT: pandn %xmm8, %xmm3
+; SSE2OR3-NEXT: pxor %xmm10, %xmm3
; SSE2OR3-NEXT: por %xmm5, %xmm3
; SSE2OR3-NEXT: movdqa %xmm4, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm5
+; SSE2OR3-NEXT: pxor %xmm8, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm5
; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm5
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm5
-; SSE2OR3-NEXT: pand %xmm5, %xmm4
-; SSE2OR3-NEXT: pandn %xmm8, %xmm5
-; SSE2OR3-NEXT: por %xmm4, %xmm5
+; SSE2OR3-NEXT: pxor %xmm5, %xmm10
+; SSE2OR3-NEXT: pand %xmm4, %xmm5
+; SSE2OR3-NEXT: por %xmm10, %xmm5
; SSE2OR3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm3[0,2]
; SSE2OR3-NEXT: movdqa %xmm1, %xmm2
; SSE2OR3-NEXT: psubd %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm5
-; SSE2OR3-NEXT: pxor %xmm9, %xmm1
+; SSE2OR3-NEXT: pxor %xmm8, %xmm5
+; SSE2OR3-NEXT: pxor %xmm8, %xmm1
; SSE2OR3-NEXT: pcmpgtd %xmm5, %xmm1
; SSE2OR3-NEXT: pand %xmm2, %xmm1
; SSE2OR3-NEXT: retq
@@ -2904,73 +2904,73 @@ define <8 x i32> @test34(<8 x i32> %a0, <8 x i64> %a1) {
; SSE2OR3-NEXT: movdqa {{.*#+}} xmm6 = [1,1,1,1]
; SSE2OR3-NEXT: pand %xmm6, %xmm1
; SSE2OR3-NEXT: pand %xmm6, %xmm0
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259456,9223372039002259456]
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [9223372039002259456,9223372039002259456]
; SSE2OR3-NEXT: movdqa %xmm3, %xmm6
-; SSE2OR3-NEXT: pxor %xmm9, %xmm6
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm10 = [9223372039002259455,9223372039002259455]
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm7
+; SSE2OR3-NEXT: pxor %xmm8, %xmm6
+; SSE2OR3-NEXT: movdqa {{.*#+}} xmm9 = [9223372039002259455,9223372039002259455]
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm7
; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm7
-; SSE2OR3-NEXT: pshufd {{.*#+}} xmm8 = xmm7[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm6
+; SSE2OR3-NEXT: pshufd {{.*#+}} xmm10 = xmm7[0,0,2,2]
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
-; SSE2OR3-NEXT: pand %xmm8, %xmm6
+; SSE2OR3-NEXT: pand %xmm10, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,1,3,3]
; SSE2OR3-NEXT: por %xmm6, %xmm7
-; SSE2OR3-NEXT: movdqa {{.*#+}} xmm8 = [4294967295,4294967295]
+; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm10
; SSE2OR3-NEXT: pand %xmm7, %xmm3
-; SSE2OR3-NEXT: pandn %xmm8, %xmm7
+; SSE2OR3-NEXT: pxor %xmm10, %xmm7
; SSE2OR3-NEXT: por %xmm3, %xmm7
; SSE2OR3-NEXT: movdqa %xmm2, %xmm3
-; SSE2OR3-NEXT: pxor %xmm9, %xmm3
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm6
+; SSE2OR3-NEXT: pxor %xmm8, %xmm3
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm6
; SSE2OR3-NEXT: pcmpgtd %xmm3, %xmm6
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm11 = xmm6[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm3
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm11, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm6[1,1,3,3]
; SSE2OR3-NEXT: por %xmm3, %xmm6
; SSE2OR3-NEXT: pand %xmm6, %xmm2
-; SSE2OR3-NEXT: pandn %xmm8, %xmm6
+; SSE2OR3-NEXT: pxor %xmm10, %xmm6
; SSE2OR3-NEXT: por %xmm2, %xmm6
; SSE2OR3-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,2],xmm7[0,2]
; SSE2OR3-NEXT: movdqa %xmm0, %xmm2
; SSE2OR3-NEXT: psubd %xmm6, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm6
-; SSE2OR3-NEXT: por %xmm9, %xmm0
+; SSE2OR3-NEXT: pxor %xmm8, %xmm6
+; SSE2OR3-NEXT: por %xmm8, %xmm0
; SSE2OR3-NEXT: pcmpgtd %xmm6, %xmm0
; SSE2OR3-NEXT: pand %xmm2, %xmm0
; SSE2OR3-NEXT: movdqa %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm3
+; SSE2OR3-NEXT: pxor %xmm8, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm3
; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm3
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm3[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm3
; SSE2OR3-NEXT: pand %xmm3, %xmm5
-; SSE2OR3-NEXT: pandn %xmm8, %xmm3
+; SSE2OR3-NEXT: pxor %xmm10, %xmm3
; SSE2OR3-NEXT: por %xmm5, %xmm3
; SSE2OR3-NEXT: movdqa %xmm4, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm2
-; SSE2OR3-NEXT: movdqa %xmm10, %xmm5
+; SSE2OR3-NEXT: pxor %xmm8, %xmm2
+; SSE2OR3-NEXT: movdqa %xmm9, %xmm5
; SSE2OR3-NEXT: pcmpgtd %xmm2, %xmm5
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm6 = xmm5[0,0,2,2]
-; SSE2OR3-NEXT: pcmpeqd %xmm10, %xmm2
+; SSE2OR3-NEXT: pcmpeqd %xmm9, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
; SSE2OR3-NEXT: pand %xmm6, %xmm2
; SSE2OR3-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,3,3]
; SSE2OR3-NEXT: por %xmm2, %xmm5
-; SSE2OR3-NEXT: pand %xmm5, %xmm4
-; SSE2OR3-NEXT: pandn %xmm8, %xmm5
-; SSE2OR3-NEXT: por %xmm4, %xmm5
+; SSE2OR3-NEXT: pxor %xmm5, %xmm10
+; SSE2OR3-NEXT: pand %xmm4, %xmm5
+; SSE2OR3-NEXT: por %xmm10, %xmm5
; SSE2OR3-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,2],xmm3[0,2]
; SSE2OR3-NEXT: movdqa %xmm1, %xmm2
; SSE2OR3-NEXT: psubd %xmm5, %xmm2
-; SSE2OR3-NEXT: pxor %xmm9, %xmm5
-; SSE2OR3-NEXT: por %xmm9, %xmm1
+; SSE2OR3-NEXT: pxor %xmm8, %xmm5
+; SSE2OR3-NEXT: por %xmm8, %xmm1
; SSE2OR3-NEXT: pcmpgtd %xmm5, %xmm1
; SSE2OR3-NEXT: pand %xmm2, %xmm1
; SSE2OR3-NEXT: retq
diff --git a/llvm/test/CodeGen/X86/vector-bo-select.ll b/llvm/test/CodeGen/X86/vector-bo-select.ll
index 74549f95a262..a3ba1601c5f3 100644
--- a/llvm/test/CodeGen/X86/vector-bo-select.ll
+++ b/llvm/test/CodeGen/X86/vector-bo-select.ll
@@ -507,51 +507,11 @@ define <16 x float> @fdiv_v16f32_commute_swap(<16 x i1> %b, <16 x float> noundef
define <8 x float> @fadd_v8f32_cast_cond(i8 noundef zeroext %pb, <8 x float> noundef %x, <8 x float> noundef %y) {
; AVX2-LABEL: fadd_v8f32_cast_cond:
; AVX2: # %bb.0:
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $5, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrb $4, %cl
-; AVX2-NEXT: movzbl %cl, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: negl %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $6, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $7, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vmovd %eax, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $2, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT: shrb $3, %dil
-; AVX2-NEXT: movzbl %dil, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0,-0.0E+0]
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vaddps %ymm1, %ymm0, %ymm0
@@ -644,52 +604,13 @@ define <8 x double> @fadd_v8f64_cast_cond(i8 noundef zeroext %pb, <8 x double> n
define <8 x float> @fsub_v8f32_cast_cond(i8 noundef zeroext %pb, <8 x float> noundef %x, <8 x float> noundef %y) {
; AVX2-LABEL: fsub_v8f32_cast_cond:
; AVX2: # %bb.0:
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $5, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrb $4, %cl
-; AVX2-NEXT: movzbl %cl, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: negl %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $6, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $7, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vmovd %eax, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $2, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT: shrb $3, %dil
-; AVX2-NEXT: movzbl %dil, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
-; AVX2-NEXT: vpand %ymm1, %ymm2, %ymm1
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
+; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vsubps %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
@@ -780,51 +701,11 @@ define <8 x double> @fsub_v8f64_cast_cond(i8 noundef zeroext %pb, <8 x double> n
define <8 x float> @fmul_v8f32_cast_cond(i8 noundef zeroext %pb, <8 x float> noundef %x, <8 x float> noundef %y) {
; AVX2-LABEL: fmul_v8f32_cast_cond:
; AVX2: # %bb.0:
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $5, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrb $4, %cl
-; AVX2-NEXT: movzbl %cl, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: negl %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $6, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $7, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vmovd %eax, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $2, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT: shrb $3, %dil
-; AVX2-NEXT: movzbl %dil, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vmulps %ymm1, %ymm0, %ymm0
@@ -917,51 +798,11 @@ define <8 x double> @fmul_v8f64_cast_cond(i8 noundef zeroext %pb, <8 x double> n
define <8 x float> @fdiv_v8f32_cast_cond(i8 noundef zeroext %pb, <8 x float> noundef %x, <8 x float> noundef %y) {
; AVX2-LABEL: fdiv_v8f32_cast_cond:
; AVX2: # %bb.0:
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $5, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: movl %edi, %ecx
-; AVX2-NEXT: shrb $4, %cl
-; AVX2-NEXT: movzbl %cl, %ecx
-; AVX2-NEXT: andl $1, %ecx
-; AVX2-NEXT: negl %ecx
-; AVX2-NEXT: vmovd %ecx, %xmm2
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $6, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $7, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm2, %xmm2
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vmovd %eax, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $1, %eax, %xmm3, %xmm3
-; AVX2-NEXT: movl %edi, %eax
-; AVX2-NEXT: shrb $2, %al
-; AVX2-NEXT: movzbl %al, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $2, %eax, %xmm3, %xmm3
-; AVX2-NEXT: shrb $3, %dil
-; AVX2-NEXT: movzbl %dil, %eax
-; AVX2-NEXT: andl $1, %eax
-; AVX2-NEXT: negl %eax
-; AVX2-NEXT: vpinsrd $3, %eax, %xmm3, %xmm3
-; AVX2-NEXT: vinserti128 $1, %xmm2, %ymm3, %ymm2
+; AVX2-NEXT: vmovd %edi, %xmm2
+; AVX2-NEXT: vpbroadcastb %xmm2, %ymm2
+; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [1,2,4,8,16,32,64,128]
+; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
+; AVX2-NEXT: vpcmpeqd %ymm3, %ymm2, %ymm2
; AVX2-NEXT: vbroadcastss {{.*#+}} ymm3 = [1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0,1.0E+0]
; AVX2-NEXT: vblendvps %ymm2, %ymm1, %ymm3, %ymm1
; AVX2-NEXT: vdivps %ymm1, %ymm0, %ymm0
diff --git a/llvm/test/CodeGen/X86/vector-fshl-128.ll b/llvm/test/CodeGen/X86/vector-fshl-128.ll
index 35032331a943..19e8e73ae943 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-128.ll
@@ -1060,31 +1060,17 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %
}
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: psllq %xmm2, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: psllq %xmm2, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: psllq %xmm2, %xmm3
-; SSE41-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE41-NEXT: psllq %xmm2, %xmm1
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: psllq %xmm2, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: psllq %xmm2, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm3[1,3]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v4i32:
; AVX: # %bb.0:
@@ -1166,9 +1152,7 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm2
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: psllq %xmm2, %xmm3
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE2-NEXT: psllq %xmm2, %xmm1
@@ -1312,35 +1296,19 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %
}
define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psllw %xmm2, %xmm3
-; SSE2-NEXT: psrlw $8, %xmm3
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: psllw %xmm2, %xmm1
-; SSE2-NEXT: psrlw $8, %xmm1
-; SSE2-NEXT: packuswb %xmm3, %xmm1
-; SSE2-NEXT: movdqa %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: psllw %xmm2, %xmm3
-; SSE41-NEXT: psrlw $8, %xmm3
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE41-NEXT: psllw %xmm2, %xmm1
-; SSE41-NEXT: psrlw $8, %xmm1
-; SSE41-NEXT: packuswb %xmm3, %xmm1
-; SSE41-NEXT: movdqa %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: psllw %xmm2, %xmm3
+; SSE-NEXT: psrlw $8, %xmm3
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: psllw %xmm2, %xmm1
+; SSE-NEXT: psrlw $8, %xmm1
+; SSE-NEXT: packuswb %xmm3, %xmm1
+; SSE-NEXT: movdqa %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v16i8:
; AVX: # %bb.0:
@@ -1441,8 +1409,6 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm0[8],xmm3[9],xmm0[9],xmm3[10],xmm0[10],xmm3[11],xmm0[11],xmm3[12],xmm0[12],xmm3[13],xmm0[13],xmm3[14],xmm0[14],xmm3[15],xmm0[15]
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psllw %xmm2, %xmm3
; X86-SSE2-NEXT: psrlw $8, %xmm3
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
@@ -1460,47 +1426,25 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %
; PR37426 - https://bugs.llvm.org/show_bug.cgi?id=37426
define void @sink_splatvar(i32* %p, i32 %shift_amt) {
-; SSE2-LABEL: sink_splatvar:
-; SSE2: # %bb.0: # %entry
-; SSE2-NEXT: movd %esi, %xmm0
-; SSE2-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE2-NEXT: movd %xmm0, %ecx
-; SSE2-NEXT: andl $31, %ecx
-; SSE2-NEXT: movd %ecx, %xmm0
-; SSE2-NEXT: .p2align 4, 0x90
-; SSE2-NEXT: .LBB8_1: # %loop
-; SSE2-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE2-NEXT: movdqu 1024(%rdi,%rax), %xmm1
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSE2-NEXT: psllq %xmm0, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; SSE2-NEXT: psllq %xmm0, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
-; SSE2-NEXT: movups %xmm1, 1024(%rdi,%rax)
-; SSE2-NEXT: addq $16, %rax
-; SSE2-NEXT: jne .LBB8_1
-; SSE2-NEXT: # %bb.2: # %end
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: sink_splatvar:
-; SSE41: # %bb.0: # %entry
-; SSE41-NEXT: movd %esi, %xmm0
-; SSE41-NEXT: movq $-1024, %rax # imm = 0xFC00
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
-; SSE41-NEXT: .p2align 4, 0x90
-; SSE41-NEXT: .LBB8_1: # %loop
-; SSE41-NEXT: # =>This Inner Loop Header: Depth=1
-; SSE41-NEXT: movdqu 1024(%rdi,%rax), %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
-; SSE41-NEXT: psllq %xmm0, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
-; SSE41-NEXT: psllq %xmm0, %xmm1
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
-; SSE41-NEXT: movups %xmm1, 1024(%rdi,%rax)
-; SSE41-NEXT: addq $16, %rax
-; SSE41-NEXT: jne .LBB8_1
-; SSE41-NEXT: # %bb.2: # %end
-; SSE41-NEXT: retq
+; SSE-LABEL: sink_splatvar:
+; SSE: # %bb.0: # %entry
+; SSE-NEXT: movd %esi, %xmm0
+; SSE-NEXT: movq $-1024, %rax # imm = 0xFC00
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0
+; SSE-NEXT: .p2align 4, 0x90
+; SSE-NEXT: .LBB8_1: # %loop
+; SSE-NEXT: # =>This Inner Loop Header: Depth=1
+; SSE-NEXT: movdqu 1024(%rdi,%rax), %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,2,3,3]
+; SSE-NEXT: psllq %xmm0, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,0,1,1]
+; SSE-NEXT: psllq %xmm0, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,3],xmm2[1,3]
+; SSE-NEXT: movups %xmm1, 1024(%rdi,%rax)
+; SSE-NEXT: addq $16, %rax
+; SSE-NEXT: jne .LBB8_1
+; SSE-NEXT: # %bb.2: # %end
+; SSE-NEXT: retq
;
; AVX1-LABEL: sink_splatvar:
; AVX1: # %bb.0: # %entry
@@ -1678,9 +1622,7 @@ define void @sink_splatvar(i32* %p, i32 %shift_amt) {
; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-SSE2-NEXT: xorl %ecx, %ecx
; X86-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero
-; X86-SSE2-NEXT: movd %xmm0, %edx
-; X86-SSE2-NEXT: andl $31, %edx
-; X86-SSE2-NEXT: movd %edx, %xmm0
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm0
; X86-SSE2-NEXT: xorl %edx, %edx
; X86-SSE2-NEXT: .p2align 4, 0x90
; X86-SSE2-NEXT: .LBB8_1: # %loop
diff --git a/llvm/test/CodeGen/X86/vector-fshl-256.ll b/llvm/test/CodeGen/X86/vector-fshl-256.ll
index 91289522ff0a..36882e35cfe5 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-256.ll
@@ -313,126 +313,82 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
; AVX1-LABEL: var_funnnel_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandnps %ymm8, %ymm2, %ymm4
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
-; AVX1-NEXT: vpsllw $12, %xmm5, %xmm6
-; AVX1-NEXT: vpsllw $4, %xmm5, %xmm5
-; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpsrlw $9, %xmm7, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm7, %xmm7
-; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $12, %xmm4, %xmm5
-; AVX1-NEXT: vpsllw $4, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm5
-; AVX1-NEXT: vpsrlw $9, %xmm1, %xmm6
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm4
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vandps %ymm2, %ymm8, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm3[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
-; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm4
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm4[4],xmm3[4],xmm4[5],xmm3[5],xmm4[6],xmm3[6],xmm4[7],xmm3[7]
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm6[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $23, %xmm7, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm7, %xmm8, %xmm7
+; AVX1-NEXT: vcvttps2dq %xmm7, %xmm7
+; AVX1-NEXT: vpmulld %xmm7, %xmm5, %xmm5
+; AVX1-NEXT: vpsrld $16, %xmm5, %xmm5
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm3 = xmm4[0],xmm3[0],xmm4[1],xmm3[1],xmm4[2],xmm3[2],xmm4[3],xmm3[3]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm4 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpaddd %xmm4, %xmm8, %xmm4
; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
-; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
-; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
-; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpmulld %xmm4, %xmm3, %xmm3
+; AVX1-NEXT: vpsrld $16, %xmm3, %xmm3
+; AVX1-NEXT: vpackusdw %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm5, %xmm8, %xmm5
+; AVX1-NEXT: vcvttps2dq %xmm5, %xmm5
+; AVX1-NEXT: vpmulld %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vpsrld $16, %xmm4, %xmm4
+; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm0 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
+; AVX1-NEXT: vpslld $23, %xmm1, %xmm1
+; AVX1-NEXT: vpaddd %xmm1, %xmm8, %xmm1
+; AVX1-NEXT: vcvttps2dq %xmm1, %xmm1
+; AVX1-NEXT: vpmulld %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vpsrld $16, %xmm0, %xmm0
+; AVX1-NEXT: vpackusdw %xmm4, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_funnnel_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpandn %ymm3, %ymm2, %ymm4
-; AVX2-NEXT: vpxor %xmm5, %xmm5, %xmm5
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm6 = ymm4[4],ymm5[4],ymm4[5],ymm5[5],ymm4[6],ymm5[6],ymm4[7],ymm5[7],ymm4[12],ymm5[12],ymm4[13],ymm5[13],ymm4[14],ymm5[14],ymm4[15],ymm5[15]
-; AVX2-NEXT: vpsrlw $1, %ymm1, %ymm1
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm7 = ymm5[4],ymm1[4],ymm5[5],ymm1[5],ymm5[6],ymm1[6],ymm5[7],ymm1[7],ymm5[12],ymm1[12],ymm5[13],ymm1[13],ymm5[14],ymm1[14],ymm5[15],ymm1[15]
-; AVX2-NEXT: vpsrlvd %ymm6, %ymm7, %ymm6
-; AVX2-NEXT: vpsrld $16, %ymm6, %ymm6
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm4 = ymm4[0],ymm5[0],ymm4[1],ymm5[1],ymm4[2],ymm5[2],ymm4[3],ymm5[3],ymm4[8],ymm5[8],ymm4[9],ymm5[9],ymm4[10],ymm5[10],ymm4[11],ymm5[11]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm5[0],ymm1[0],ymm5[1],ymm1[1],ymm5[2],ymm1[2],ymm5[3],ymm1[3],ymm5[8],ymm1[8],ymm5[9],ymm1[9],ymm5[10],ymm1[10],ymm5[11],ymm1[11]
-; AVX2-NEXT: vpsrlvd %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpackusdw %ymm6, %ymm1, %ymm1
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm5[4],ymm0[4],ymm5[5],ymm0[5],ymm5[6],ymm0[6],ymm5[7],ymm0[7],ymm5[12],ymm0[12],ymm5[13],ymm0[13],ymm5[14],ymm0[14],ymm5[15],ymm0[15]
-; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm2[4],ymm5[4],ymm2[5],ymm5[5],ymm2[6],ymm5[6],ymm2[7],ymm5[7],ymm2[12],ymm5[12],ymm2[13],ymm5[13],ymm2[14],ymm5[14],ymm2[15],ymm5[15]
-; AVX2-NEXT: vpsllvd %ymm3, %ymm4, %ymm3
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX2-NEXT: vpsllvd %ymm5, %ymm3, %ymm3
; AVX2-NEXT: vpsrld $16, %ymm3, %ymm3
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm5[0],ymm0[0],ymm5[1],ymm0[1],ymm5[2],ymm0[2],ymm5[3],ymm0[3],ymm5[8],ymm0[8],ymm5[9],ymm0[9],ymm5[10],ymm0[10],ymm5[11],ymm0[11]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm5[0],ymm2[1],ymm5[1],ymm2[2],ymm5[2],ymm2[3],ymm5[3],ymm2[8],ymm5[8],ymm2[9],ymm5[9],ymm2[10],ymm5[10],ymm2[11],ymm5[11]
-; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX2-NEXT: vpsllvd %ymm1, %ymm0, %ymm0
; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_funnnel_v16i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm4
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
-; AVX512F-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512F-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpslld $16, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpsrld $16, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_funnnel_v16i16:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpandn %ymm3, %ymm2, %ymm4
-; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
-; AVX512VL-NEXT: vpsrlw $1, %ymm1, %ymm1
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512VL-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpslld $16, %zmm0, %zmm0
; AVX512VL-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpsllvd %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpsrld $16, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VL-NEXT: retq
;
@@ -475,25 +431,22 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
;
; XOPAVX1-LABEL: var_funnnel_v16i16:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vmovaps {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; XOPAVX1-NEXT: vandps %ymm3, %ymm2, %ymm4
-; XOPAVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; XOPAVX1-NEXT: vpshlw %xmm5, %xmm6, %xmm5
-; XOPAVX1-NEXT: vpshlw %xmm4, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; XOPAVX1-NEXT: vandnps %ymm3, %ymm2, %ymm2
-; XOPAVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; XOPAVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; XOPAVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
-; XOPAVX1-NEXT: vpshlw %xmm3, %xmm5, %xmm3
-; XOPAVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
+; XOPAVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; XOPAVX1-NEXT: vextractf128 $1, %ymm2, %xmm4
+; XOPAVX1-NEXT: vpshlw %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [65521,65521,65521,65521,65521,65521,65521,65521]
+; XOPAVX1-NEXT: vpaddw %xmm5, %xmm4, %xmm4
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT: vpsrlw $1, %xmm6, %xmm6
+; XOPAVX1-NEXT: vpshlw %xmm4, %xmm6, %xmm4
+; XOPAVX1-NEXT: vpor %xmm4, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vpaddw %xmm5, %xmm2, %xmm2
; XOPAVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
; XOPAVX1-NEXT: vpshlw %xmm2, %xmm1, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_funnnel_v16i16:
@@ -931,9 +884,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX2-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -943,9 +894,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512F-LABEL: splatvar_funnnel_v8i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512F-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; AVX512F-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512F-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -955,8 +904,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VL-LABEL: splatvar_funnnel_v8i32:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; AVX512VL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512VL-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -966,9 +914,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512BW-LABEL: splatvar_funnnel_v8i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512BW-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; AVX512BW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512BW-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -987,8 +933,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; AVX512VLBW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512VLBW-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -1022,9 +967,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; XOPAVX2-NEXT: vpand %xmm4, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpsllq %xmm2, %ymm3, %ymm3
; XOPAVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; XOPAVX2-NEXT: vpsllq %xmm2, %ymm0, %ymm0
@@ -1038,22 +981,20 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
; AVX1-LABEL: splatvar_funnnel_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
; AVX1-NEXT: vpsrlw %xmm4, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v16i16:
@@ -1138,22 +1079,20 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
;
; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
; XOPAVX1-NEXT: vpandn %xmm3, %xmm2, %xmm4
-; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; XOPAVX1-NEXT: vpsrlw $1, %xmm5, %xmm5
; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm5, %xmm5
-; XOPAVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
-; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsrlw $1, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
@@ -1200,7 +1139,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX2-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1213,7 +1151,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1226,7 +1163,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1239,7 +1175,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512BW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1252,7 +1187,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512VBMI2-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1265,7 +1199,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512VLBW-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1278,7 +1211,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm2, %ymm3, %ymm3
; AVX512VLVBMI2-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
@@ -1339,8 +1271,7 @@ define void @fancierRotate2(i32* %arr, i8* %control, i32 %rot0, i32 %rot1) {
; AVX1-NEXT: vmovd %ecx, %xmm2
; AVX1-NEXT: movq $-1024, %rax # imm = 0xFC00
; AVX1-NEXT: vpxor %xmm8, %xmm8, %xmm8
-; AVX1-NEXT: vmovddup {{.*#+}} xmm3 = [31,31]
-; AVX1-NEXT: # xmm3 = mem[0,0]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [31,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm1, %xmm1
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm2
; AVX1-NEXT: .p2align 4, 0x90
diff --git a/llvm/test/CodeGen/X86/vector-fshl-512.ll b/llvm/test/CodeGen/X86/vector-fshl-512.ll
index dd158f9603bf..cc9aedf1bd74 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-512.ll
@@ -502,9 +502,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512F-LABEL: splatvar_funnnel_v16i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512F-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpsllq %xmm2, %zmm3, %zmm3
; AVX512F-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512F-NEXT: vpsllq %xmm2, %zmm0, %zmm0
@@ -514,8 +512,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VL-LABEL: splatvar_funnnel_v16i32:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpsllq %xmm2, %zmm3, %zmm3
; AVX512VL-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512VL-NEXT: vpsllq %xmm2, %zmm0, %zmm0
@@ -525,9 +522,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512BW-LABEL: splatvar_funnnel_v16i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512BW-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsllq %xmm2, %zmm3, %zmm3
; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512BW-NEXT: vpsllq %xmm2, %zmm0, %zmm0
@@ -543,8 +538,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VLBW-LABEL: splatvar_funnnel_v16i32:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpsllq %xmm2, %zmm3, %zmm3
; AVX512VLBW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512VLBW-NEXT: vpsllq %xmm2, %zmm0, %zmm0
@@ -651,7 +645,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm2, %ymm5, %ymm5
; AVX512F-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
@@ -674,7 +667,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm2, %ymm5, %ymm5
; AVX512VL-NEXT: vpsrlw $8, %ymm5, %ymm5
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm3 = ymm4[0],ymm3[0],ymm4[1],ymm3[1],ymm4[2],ymm3[2],ymm4[3],ymm3[3],ymm4[4],ymm3[4],ymm4[5],ymm3[5],ymm4[6],ymm3[6],ymm4[7],ymm3[7],ymm4[16],ymm3[16],ymm4[17],ymm3[17],ymm4[18],ymm3[18],ymm4[19],ymm3[19],ymm4[20],ymm3[20],ymm4[21],ymm3[21],ymm4[22],ymm3[22],ymm4[23],ymm3[23]
@@ -695,7 +687,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm2, %zmm3, %zmm3
; AVX512BW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
@@ -708,7 +699,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm2, %zmm3, %zmm3
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
@@ -721,7 +711,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm2, %zmm3, %zmm3
; AVX512VLBW-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
@@ -734,7 +723,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm2, %zmm3, %zmm3
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm3, %zmm3
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
index 57abe2333f1c..16013c3cd221 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-128.ll
@@ -827,27 +827,15 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind
}
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: psllq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT: psllq %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE41-NEXT: psllq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE41-NEXT: psllq %xmm1, %xmm0
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE-NEXT: psllq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE-NEXT: psllq %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v4i32:
; AVX: # %bb.0:
@@ -918,10 +906,8 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: psllq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-SSE2-NEXT: psllq %xmm1, %xmm0
@@ -935,11 +921,9 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind
define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v8i16:
; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslld %xmm1, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1051,11 +1035,9 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v8i16:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: pslld %xmm1, %xmm2
; X86-SSE2-NEXT: psrad $16, %xmm2
; X86-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1069,33 +1051,18 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
}
define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psllw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psllw %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: psllw %xmm1, %xmm2
-; SSE41-NEXT: psrlw $8, %xmm2
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: psllw %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE-NEXT: psllw %xmm1, %xmm2
+; SSE-NEXT: psrlw $8, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: psllw %xmm1, %xmm0
+; SSE-NEXT: psrlw $8, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v16i8:
; AVX: # %bb.0:
@@ -1196,11 +1163,9 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v16i8:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psllw %xmm1, %xmm2
; X86-SSE2-NEXT: psrlw $8, %xmm2
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
index 50dc71ceb7e2..20660c78ce4f 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-256.ll
@@ -681,10 +681,8 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpsllq %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
@@ -896,7 +894,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -909,7 +906,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512F-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -922,7 +918,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512VL-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -935,7 +930,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512BW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -948,7 +942,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512VLBW-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -961,7 +954,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512VBMI2-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -974,7 +966,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512VLVBMI2-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
index 683a4bc5c63c..799abb799b7f 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-512.ll
@@ -371,10 +371,9 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm1, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -393,10 +392,9 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -417,7 +415,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -430,7 +427,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -443,7 +439,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -456,7 +451,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
diff --git a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
index 29033a96642a..da4de476774a 100644
--- a/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshl-rot-sub128.ll
@@ -162,10 +162,8 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psllq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: psllq %xmm1, %xmm0
@@ -261,10 +259,8 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: psllq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-SSE2-NEXT: psllq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-fshr-128.ll b/llvm/test/CodeGen/X86/vector-fshr-128.ll
index de554c7fe72e..40d5b3929c34 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-128.ll
@@ -1150,31 +1150,17 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %
}
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm3
-; SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE2-NEXT: movd %xmm2, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm2
-; SSE2-NEXT: psrlq %xmm2, %xmm3
-; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE2-NEXT: psrlq %xmm2, %xmm1
-; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; SSE2-NEXT: movaps %xmm1, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm3
-; SSE41-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: psrlq %xmm2, %xmm3
-; SSE41-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
-; SSE41-NEXT: psrlq %xmm2, %xmm1
-; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
-; SSE41-NEXT: movaps %xmm1, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: psrlq %xmm2, %xmm3
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
+; SSE-NEXT: psrlq %xmm2, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm3[0,2]
+; SSE-NEXT: movaps %xmm1, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v4i32:
; AVX: # %bb.0:
@@ -1259,9 +1245,7 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %
; X86-SSE2: # %bb.0:
; X86-SSE2-NEXT: movdqa %xmm1, %xmm3
; X86-SSE2-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm0[2],xmm3[3],xmm0[3]
-; X86-SSE2-NEXT: movd %xmm2, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm2
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
; X86-SSE2-NEXT: psrlq %xmm2, %xmm3
; X86-SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
; X86-SSE2-NEXT: psrlq %xmm2, %xmm1
@@ -1406,37 +1390,20 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %
}
define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm1, %xmm4
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm2, %xmm4
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm4
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE2-NEXT: psrlw %xmm2, %xmm1
-; SSE2-NEXT: pand %xmm1, %xmm3
-; SSE2-NEXT: packuswb %xmm4, %xmm3
-; SSE2-NEXT: movdqa %xmm3, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: movdqa %xmm1, %xmm4
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
-; SSE41-NEXT: psrlw %xmm2, %xmm4
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm3, %xmm4
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
-; SSE41-NEXT: psrlw %xmm2, %xmm1
-; SSE41-NEXT: pand %xmm1, %xmm3
-; SSE41-NEXT: packuswb %xmm4, %xmm3
-; SSE41-NEXT: movdqa %xmm3, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2
+; SSE-NEXT: psrlw %xmm2, %xmm4
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm3, %xmm4
+; SSE-NEXT: punpcklbw {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3],xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
+; SSE-NEXT: psrlw %xmm2, %xmm1
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: packuswb %xmm4, %xmm3
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v16i8:
; AVX: # %bb.0:
@@ -1540,8 +1507,6 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %
; X86-SSE2-NEXT: movdqa %xmm1, %xmm4
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm0[8],xmm4[9],xmm0[9],xmm4[10],xmm0[10],xmm4[11],xmm0[11],xmm4[12],xmm0[12],xmm4[13],xmm0[13],xmm4[14],xmm0[14],xmm4[15],xmm0[15]
; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm2
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm2 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm2[0]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm2 = xmm2[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psrlw %xmm2, %xmm4
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm3, %xmm4
diff --git a/llvm/test/CodeGen/X86/vector-fshr-256.ll b/llvm/test/CodeGen/X86/vector-fshr-256.ll
index 09d01ebafed5..12eede8976c9 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-256.ll
@@ -318,126 +318,105 @@ define <8 x i32> @var_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %amt)
define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
; AVX1-LABEL: var_funnnel_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovaps {{.*#+}} ymm8 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX1-NEXT: vandps %ymm2, %ymm8, %ymm4
-; AVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
-; AVX1-NEXT: vpsllw $12, %xmm5, %xmm6
-; AVX1-NEXT: vpsllw $4, %xmm5, %xmm5
-; AVX1-NEXT: vpor %xmm6, %xmm5, %xmm5
-; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm6
-; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm7
-; AVX1-NEXT: vpsrlw $8, %xmm7, %xmm3
-; AVX1-NEXT: vpblendvb %xmm5, %xmm3, %xmm7, %xmm3
-; AVX1-NEXT: vpsrlw $4, %xmm3, %xmm5
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $2, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsrlw $1, %xmm3, %xmm5
-; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
-; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vpsllw $12, %xmm4, %xmm5
-; AVX1-NEXT: vpsllw $4, %xmm4, %xmm4
-; AVX1-NEXT: vpor %xmm5, %xmm4, %xmm4
+; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
+; AVX1-NEXT: vpsllw $12, %xmm3, %xmm4
+; AVX1-NEXT: vpsllw $4, %xmm3, %xmm5
+; AVX1-NEXT: vpor %xmm4, %xmm5, %xmm4
; AVX1-NEXT: vpaddw %xmm4, %xmm4, %xmm5
-; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm6
-; AVX1-NEXT: vpblendvb %xmm4, %xmm6, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm4
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm4
+; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; AVX1-NEXT: vpsrlw $8, %xmm6, %xmm7
+; AVX1-NEXT: vpblendvb %xmm4, %xmm7, %xmm6, %xmm4
+; AVX1-NEXT: vpsrlw $4, %xmm4, %xmm6
+; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $2, %xmm4, %xmm6
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm4
+; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vpsrlw $1, %xmm4, %xmm6
; AVX1-NEXT: vpaddw %xmm5, %xmm5, %xmm5
-; AVX1-NEXT: vpblendvb %xmm5, %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; AVX1-NEXT: vandnps %ymm8, %ymm2, %ymm2
-; AVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm3[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm5 = [1065353216,1065353216,1065353216,1065353216]
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
-; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero
-; AVX1-NEXT: vpslld $23, %xmm3, %xmm3
-; AVX1-NEXT: vpaddd %xmm5, %xmm3, %xmm3
-; AVX1-NEXT: vcvttps2dq %xmm3, %xmm3
-; AVX1-NEXT: vpackusdw %xmm4, %xmm3, %xmm3
-; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm4
-; AVX1-NEXT: vpsllw $1, %xmm4, %xmm4
-; AVX1-NEXT: vpmullw %xmm3, %xmm4, %xmm3
-; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm4 = xmm2[4,4,5,5,6,6,7,7]
-; AVX1-NEXT: vpslld $23, %xmm4, %xmm4
-; AVX1-NEXT: vpaddd %xmm5, %xmm4, %xmm4
-; AVX1-NEXT: vcvttps2dq %xmm4, %xmm4
+; AVX1-NEXT: vpblendvb %xmm5, %xmm6, %xmm4, %xmm4
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm8 = [15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vpxor %xmm3, %xmm8, %xmm6
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm3 = xmm6[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $23, %xmm3, %xmm7
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [1065353216,1065353216,1065353216,1065353216]
+; AVX1-NEXT: vpaddd %xmm3, %xmm7, %xmm7
+; AVX1-NEXT: vcvttps2dq %xmm7, %xmm7
+; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero,xmm6[2],zero,xmm6[3],zero
+; AVX1-NEXT: vpslld $23, %xmm6, %xmm6
+; AVX1-NEXT: vpaddd %xmm3, %xmm6, %xmm6
+; AVX1-NEXT: vcvttps2dq %xmm6, %xmm6
+; AVX1-NEXT: vpackusdw %xmm7, %xmm6, %xmm6
+; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; AVX1-NEXT: vpsllw $1, %xmm7, %xmm7
+; AVX1-NEXT: vpmullw %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vpor %xmm4, %xmm6, %xmm4
+; AVX1-NEXT: vpsllw $12, %xmm2, %xmm6
+; AVX1-NEXT: vpsllw $4, %xmm2, %xmm7
+; AVX1-NEXT: vpor %xmm6, %xmm7, %xmm6
+; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm7
+; AVX1-NEXT: vpsrlw $8, %xmm1, %xmm5
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $4, %xmm1, %xmm5
+; AVX1-NEXT: vpblendvb %xmm7, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $2, %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm7, %xmm7, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpsrlw $1, %xmm1, %xmm5
+; AVX1-NEXT: vpaddw %xmm6, %xmm6, %xmm6
+; AVX1-NEXT: vpblendvb %xmm6, %xmm5, %xmm1, %xmm1
+; AVX1-NEXT: vpxor %xmm2, %xmm8, %xmm2
+; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm5 = xmm2[4,4,5,5,6,6,7,7]
+; AVX1-NEXT: vpslld $23, %xmm5, %xmm5
+; AVX1-NEXT: vpaddd %xmm3, %xmm5, %xmm5
+; AVX1-NEXT: vcvttps2dq %xmm5, %xmm5
; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero
; AVX1-NEXT: vpslld $23, %xmm2, %xmm2
-; AVX1-NEXT: vpaddd %xmm5, %xmm2, %xmm2
+; AVX1-NEXT: vpaddd %xmm3, %xmm2, %xmm2
; AVX1-NEXT: vcvttps2dq %xmm2, %xmm2
-; AVX1-NEXT: vpackusdw %xmm4, %xmm2, %xmm2
+; AVX1-NEXT: vpackusdw %xmm5, %xmm2, %xmm2
; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0
; AVX1-NEXT: vpmullw %xmm2, %xmm0, %xmm0
-; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; AVX1-NEXT: vinsertf128 $1, %xmm4, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: var_funnnel_v16i16:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpxor %xmm3, %xmm3, %xmm3
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm3[4],ymm1[4],ymm3[5],ymm1[5],ymm3[6],ymm1[6],ymm3[7],ymm1[7],ymm3[12],ymm1[12],ymm3[13],ymm1[13],ymm3[14],ymm1[14],ymm3[15],ymm1[15]
-; AVX2-NEXT: vmovdqa {{.*#+}} ymm5 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX2-NEXT: vpand %ymm5, %ymm2, %ymm6
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm7 = ymm6[4],ymm3[4],ymm6[5],ymm3[5],ymm6[6],ymm3[6],ymm6[7],ymm3[7],ymm6[12],ymm3[12],ymm6[13],ymm3[13],ymm6[14],ymm3[14],ymm6[15],ymm3[15]
-; AVX2-NEXT: vpsrlvd %ymm7, %ymm4, %ymm4
-; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm3[0],ymm1[0],ymm3[1],ymm1[1],ymm3[2],ymm1[2],ymm3[3],ymm1[3],ymm3[8],ymm1[8],ymm3[9],ymm1[9],ymm3[10],ymm1[10],ymm3[11],ymm1[11]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm6 = ymm6[0],ymm3[0],ymm6[1],ymm3[1],ymm6[2],ymm3[2],ymm6[3],ymm3[3],ymm6[8],ymm3[8],ymm6[9],ymm3[9],ymm6[10],ymm3[10],ymm6[11],ymm3[11]
-; AVX2-NEXT: vpsrlvd %ymm6, %ymm1, %ymm1
-; AVX2-NEXT: vpsrld $16, %ymm1, %ymm1
-; AVX2-NEXT: vpackusdw %ymm4, %ymm1, %ymm1
-; AVX2-NEXT: vpandn %ymm5, %ymm2, %ymm2
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm4 = ymm2[4],ymm3[4],ymm2[5],ymm3[5],ymm2[6],ymm3[6],ymm2[7],ymm3[7],ymm2[12],ymm3[12],ymm2[13],ymm3[13],ymm2[14],ymm3[14],ymm2[15],ymm3[15]
-; AVX2-NEXT: vpsllw $1, %ymm0, %ymm0
-; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm3[4],ymm0[4],ymm3[5],ymm0[5],ymm3[6],ymm0[6],ymm3[7],ymm0[7],ymm3[12],ymm0[12],ymm3[13],ymm0[13],ymm3[14],ymm0[14],ymm3[15],ymm0[15]
-; AVX2-NEXT: vpsllvd %ymm4, %ymm5, %ymm4
-; AVX2-NEXT: vpsrld $16, %ymm4, %ymm4
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm2 = ymm2[0],ymm3[0],ymm2[1],ymm3[1],ymm2[2],ymm3[2],ymm2[3],ymm3[3],ymm2[8],ymm3[8],ymm2[9],ymm3[9],ymm2[10],ymm3[10],ymm2[11],ymm3[11]
-; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm3[0],ymm0[0],ymm3[1],ymm0[1],ymm3[2],ymm0[2],ymm3[3],ymm0[3],ymm3[8],ymm0[8],ymm3[9],ymm0[9],ymm3[10],ymm0[10],ymm3[11],ymm0[11]
-; AVX2-NEXT: vpsllvd %ymm2, %ymm0, %ymm0
-; AVX2-NEXT: vpsrld $16, %ymm0, %ymm0
-; AVX2-NEXT: vpackusdw %ymm4, %ymm0, %ymm0
-; AVX2-NEXT: vpor %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm3 = ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
+; AVX2-NEXT: vpxor %xmm4, %xmm4, %xmm4
+; AVX2-NEXT: vpunpckhwd {{.*#+}} ymm5 = ymm2[4],ymm4[4],ymm2[5],ymm4[5],ymm2[6],ymm4[6],ymm2[7],ymm4[7],ymm2[12],ymm4[12],ymm2[13],ymm4[13],ymm2[14],ymm4[14],ymm2[15],ymm4[15]
+; AVX2-NEXT: vpsrlvd %ymm5, %ymm3, %ymm3
+; AVX2-NEXT: vpblendw {{.*#+}} ymm3 = ymm3[0],ymm4[1],ymm3[2],ymm4[3],ymm3[4],ymm4[5],ymm3[6],ymm4[7],ymm3[8],ymm4[9],ymm3[10],ymm4[11],ymm3[12],ymm4[13],ymm3[14],ymm4[15]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11]
+; AVX2-NEXT: vpunpcklwd {{.*#+}} ymm1 = ymm2[0],ymm4[0],ymm2[1],ymm4[1],ymm2[2],ymm4[2],ymm2[3],ymm4[3],ymm2[8],ymm4[8],ymm2[9],ymm4[9],ymm2[10],ymm4[10],ymm2[11],ymm4[11]
+; AVX2-NEXT: vpsrlvd %ymm1, %ymm0, %ymm0
+; AVX2-NEXT: vpblendw {{.*#+}} ymm0 = ymm0[0],ymm4[1],ymm0[2],ymm4[3],ymm0[4],ymm4[5],ymm0[6],ymm4[7],ymm0[8],ymm4[9],ymm0[10],ymm4[11],ymm0[12],ymm4[13],ymm0[14],ymm4[15]
+; AVX2-NEXT: vpackusdw %ymm3, %ymm0, %ymm0
; AVX2-NEXT: retq
;
; AVX512F-LABEL: var_funnnel_v16i16:
; AVX512F: # %bb.0:
-; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm4
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512F-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
-; AVX512F-NEXT: vpandn %ymm3, %ymm2, %ymm2
-; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512F-NEXT: vpsllw $1, %ymm0, %ymm0
; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512F-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
+; AVX512F-NEXT: vpslld $16, %zmm0, %zmm0
; AVX512F-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512F-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512F-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; AVX512F-NEXT: vpmovdw %zmm0, %ymm0
; AVX512F-NEXT: retq
;
; AVX512VL-LABEL: var_funnnel_v16i16:
; AVX512VL: # %bb.0:
-; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm4
-; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm4 = ymm4[0],zero,ymm4[1],zero,ymm4[2],zero,ymm4[3],zero,ymm4[4],zero,ymm4[5],zero,ymm4[6],zero,ymm4[7],zero,ymm4[8],zero,ymm4[9],zero,ymm4[10],zero,ymm4[11],zero,ymm4[12],zero,ymm4[13],zero,ymm4[14],zero,ymm4[15],zero
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
-; AVX512VL-NEXT: vpsrlvd %zmm4, %zmm1, %zmm1
-; AVX512VL-NEXT: vpandn %ymm3, %ymm2, %ymm2
-; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm2 = ymm2[0],zero,ymm2[1],zero,ymm2[2],zero,ymm2[3],zero,ymm2[4],zero,ymm2[5],zero,ymm2[6],zero,ymm2[7],zero,ymm2[8],zero,ymm2[9],zero,ymm2[10],zero,ymm2[11],zero,ymm2[12],zero,ymm2[13],zero,ymm2[14],zero,ymm2[15],zero
-; AVX512VL-NEXT: vpsllw $1, %ymm0, %ymm0
; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm0 = ymm0[0],zero,ymm0[1],zero,ymm0[2],zero,ymm0[3],zero,ymm0[4],zero,ymm0[5],zero,ymm0[6],zero,ymm0[7],zero,ymm0[8],zero,ymm0[9],zero,ymm0[10],zero,ymm0[11],zero,ymm0[12],zero,ymm0[13],zero,ymm0[14],zero,ymm0[15],zero
-; AVX512VL-NEXT: vpsllvd %zmm2, %zmm0, %zmm0
+; AVX512VL-NEXT: vpslld $16, %zmm0, %zmm0
; AVX512VL-NEXT: vpord %zmm1, %zmm0, %zmm0
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm1
+; AVX512VL-NEXT: vpmovzxwd {{.*#+}} zmm1 = ymm1[0],zero,ymm1[1],zero,ymm1[2],zero,ymm1[3],zero,ymm1[4],zero,ymm1[5],zero,ymm1[6],zero,ymm1[7],zero,ymm1[8],zero,ymm1[9],zero,ymm1[10],zero,ymm1[11],zero,ymm1[12],zero,ymm1[13],zero,ymm1[14],zero,ymm1[15],zero
+; AVX512VL-NEXT: vpsrlvd %zmm1, %zmm0, %zmm0
; AVX512VL-NEXT: vpmovdw %zmm0, %ymm0
; AVX512VL-NEXT: retq
;
@@ -481,25 +460,25 @@ define <16 x i16> @var_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %
;
; XOPAVX1-LABEL: var_funnnel_v16i16:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vmovaps {{.*#+}} ymm3 = [15,15,15,15,15,15,15,15,15,15,15,15,15,15,15,15]
-; XOPAVX1-NEXT: vandnps %ymm3, %ymm2, %ymm4
-; XOPAVX1-NEXT: vextractf128 $1, %ymm4, %xmm5
-; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm6
-; XOPAVX1-NEXT: vpsllw $1, %xmm6, %xmm6
-; XOPAVX1-NEXT: vpshlw %xmm5, %xmm6, %xmm5
-; XOPAVX1-NEXT: vpsllw $1, %xmm0, %xmm0
-; XOPAVX1-NEXT: vpshlw %xmm4, %xmm0, %xmm0
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm0, %ymm0
-; XOPAVX1-NEXT: vandps %ymm3, %ymm2, %ymm2
+; XOPAVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm2, %ymm2
; XOPAVX1-NEXT: vextractf128 $1, %ymm2, %xmm3
; XOPAVX1-NEXT: vpxor %xmm4, %xmm4, %xmm4
-; XOPAVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm3
-; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
-; XOPAVX1-NEXT: vpshlw %xmm3, %xmm5, %xmm3
-; XOPAVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm2
-; XOPAVX1-NEXT: vpshlw %xmm2, %xmm1, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm1, %ymm1
-; XOPAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
+; XOPAVX1-NEXT: vpsubw %xmm3, %xmm4, %xmm5
+; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm6
+; XOPAVX1-NEXT: vpshlw %xmm5, %xmm6, %xmm5
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm6 = [15,15,15,15,15,15,15,15]
+; XOPAVX1-NEXT: vpxor %xmm6, %xmm3, %xmm3
+; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm7
+; XOPAVX1-NEXT: vpsllw $1, %xmm7, %xmm7
+; XOPAVX1-NEXT: vpshlw %xmm3, %xmm7, %xmm3
+; XOPAVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsubw %xmm2, %xmm4, %xmm4
+; XOPAVX1-NEXT: vpshlw %xmm4, %xmm1, %xmm1
+; XOPAVX1-NEXT: vpxor %xmm6, %xmm2, %xmm2
+; XOPAVX1-NEXT: vpsllw $1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vpshlw %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
+; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: var_funnnel_v16i16:
@@ -937,9 +916,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX2-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; AVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX2-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -949,9 +926,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512F-LABEL: splatvar_funnnel_v8i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512F-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; AVX512F-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512F-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -961,8 +936,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VL-LABEL: splatvar_funnnel_v8i32:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; AVX512VL-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512VL-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -972,9 +946,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512BW-LABEL: splatvar_funnnel_v8i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512BW-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; AVX512BW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512BW-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -993,8 +965,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; AVX512VLBW-LABEL: splatvar_funnnel_v8i32:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; AVX512VLBW-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; AVX512VLBW-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -1029,9 +1000,7 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
; XOPAVX2-LABEL: splatvar_funnnel_v8i32:
; XOPAVX2: # %bb.0:
; XOPAVX2-NEXT: vpunpckhdq {{.*#+}} ymm3 = ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[6],ymm0[6],ymm1[7],ymm0[7]
-; XOPAVX2-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; XOPAVX2-NEXT: vpand %xmm4, %xmm2, %xmm2
-; XOPAVX2-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; XOPAVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; XOPAVX2-NEXT: vpsrlq %xmm2, %ymm3, %ymm3
; XOPAVX2-NEXT: vpunpckldq {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[4],ymm0[4],ymm1[5],ymm0[5]
; XOPAVX2-NEXT: vpsrlq %xmm2, %ymm0, %ymm0
@@ -1045,22 +1014,20 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %
define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> %amt) nounwind {
; AVX1-LABEL: splatvar_funnnel_v16i16:
; AVX1: # %bb.0:
-; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; AVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
; AVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; AVX1-NEXT: vpsrlw %xmm4, %xmm5, %xmm5
-; AVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
-; AVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; AVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2
-; AVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; AVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; AVX1-NEXT: vpsllw $1, %xmm3, %xmm3
; AVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
+; AVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
+; AVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
; AVX1-NEXT: vpsllw $1, %xmm0, %xmm0
; AVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm0
+; AVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; AVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; AVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; AVX1-NEXT: retq
;
; AVX2-LABEL: splatvar_funnnel_v16i16:
@@ -1146,22 +1113,20 @@ define <16 x i16> @splatvar_funnnel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i
;
; XOPAVX1-LABEL: splatvar_funnnel_v16i16:
; XOPAVX1: # %bb.0:
-; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,15,15,15,15,15,15,15]
+; XOPAVX1-NEXT: vmovdqa {{.*#+}} xmm3 = [15,0,0,0]
; XOPAVX1-NEXT: vpand %xmm3, %xmm2, %xmm4
-; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm4 = xmm4[0],zero,zero,zero,xmm4[1],zero,zero,zero
; XOPAVX1-NEXT: vextractf128 $1, %ymm1, %xmm5
; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm5, %xmm5
-; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
-; XOPAVX1-NEXT: vinsertf128 $1, %xmm5, %ymm1, %ymm1
; XOPAVX1-NEXT: vpandn %xmm3, %xmm2, %xmm2
-; XOPAVX1-NEXT: vpmovzxwq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,xmm2[1],zero,zero,zero
; XOPAVX1-NEXT: vextractf128 $1, %ymm0, %xmm3
; XOPAVX1-NEXT: vpsllw $1, %xmm3, %xmm3
; XOPAVX1-NEXT: vpsllw %xmm2, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpor %xmm5, %xmm3, %xmm3
+; XOPAVX1-NEXT: vpsrlw %xmm4, %xmm1, %xmm1
; XOPAVX1-NEXT: vpsllw $1, %xmm0, %xmm0
; XOPAVX1-NEXT: vpsllw %xmm2, %xmm0, %xmm0
+; XOPAVX1-NEXT: vpor %xmm1, %xmm0, %xmm0
; XOPAVX1-NEXT: vinsertf128 $1, %xmm3, %ymm0, %ymm0
-; XOPAVX1-NEXT: vorps %ymm1, %ymm0, %ymm0
; XOPAVX1-NEXT: retq
;
; XOPAVX2-LABEL: splatvar_funnnel_v16i16:
@@ -1209,7 +1174,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX2-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -1223,7 +1187,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -1237,7 +1200,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -1251,7 +1213,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -1266,7 +1227,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm3 = [0,2,4,6,8,10,12,14,64,66,68,70,72,74,76,78,16,18,20,22,24,26,28,30,80,82,84,86,88,90,92,94]
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} ymm4 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm2, %ymm4, %ymm4
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
; AVX512VBMI2-NEXT: vpsrlw %xmm2, %ymm0, %ymm0
@@ -1278,7 +1238,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VLBW-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -1292,7 +1251,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm1[8],ymm0[8],ymm1[9],ymm0[9],ymm1[10],ymm0[10],ymm1[11],ymm0[11],ymm1[12],ymm0[12],ymm1[13],ymm0[13],ymm1[14],ymm0[14],ymm1[15],ymm0[15],ymm1[24],ymm0[24],ymm1[25],ymm0[25],ymm1[26],ymm0[26],ymm1[27],ymm0[27],ymm1[28],ymm0[28],ymm1[29],ymm0[29],ymm1[30],ymm0[30],ymm1[31],ymm0[31]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsrlw %xmm2, %ymm3, %ymm3
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm1[0],ymm0[0],ymm1[1],ymm0[1],ymm1[2],ymm0[2],ymm1[3],ymm0[3],ymm1[4],ymm0[4],ymm1[5],ymm0[5],ymm1[6],ymm0[6],ymm1[7],ymm0[7],ymm1[16],ymm0[16],ymm1[17],ymm0[17],ymm1[18],ymm0[18],ymm1[19],ymm0[19],ymm1[20],ymm0[20],ymm1[21],ymm0[21],ymm1[22],ymm0[22],ymm1[23],ymm0[23]
; AVX512VLVBMI2-NEXT: vpsrlw %xmm2, %ymm0, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-fshr-512.ll b/llvm/test/CodeGen/X86/vector-fshr-512.ll
index e8537d023f7f..64dfa95274b0 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-512.ll
@@ -506,9 +506,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512F-LABEL: splatvar_funnnel_v16i32:
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512F-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512F-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512F-NEXT: vpsrlq %xmm2, %zmm3, %zmm3
; AVX512F-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512F-NEXT: vpsrlq %xmm2, %zmm0, %zmm0
@@ -518,8 +516,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VL-LABEL: splatvar_funnnel_v16i32:
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512VL-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VL-NEXT: vpsrlq %xmm2, %zmm3, %zmm3
; AVX512VL-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512VL-NEXT: vpsrlq %xmm2, %zmm0, %zmm0
@@ -529,9 +526,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512BW-LABEL: splatvar_funnnel_v16i32:
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512BW-NEXT: vpbroadcastd {{.*#+}} xmm4 = [31,31,31,31]
-; AVX512BW-NEXT: vpand %xmm4, %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512BW-NEXT: vpsrlq %xmm2, %zmm3, %zmm3
; AVX512BW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512BW-NEXT: vpsrlq %xmm2, %zmm0, %zmm0
@@ -548,8 +543,7 @@ define <16 x i32> @splatvar_funnnel_v16i32(<16 x i32> %x, <16 x i32> %y, <16 x i
; AVX512VLBW-LABEL: splatvar_funnnel_v16i32:
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhdq {{.*#+}} zmm3 = zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[14],zmm0[14],zmm1[15],zmm0[15]
-; AVX512VLBW-NEXT: vpandd {{\.?LCPI[0-9]+_[0-9]+}}(%rip){1to4}, %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
+; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
; AVX512VLBW-NEXT: vpsrlq %xmm2, %zmm3, %zmm3
; AVX512VLBW-NEXT: vpunpckldq {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[12],zmm0[12],zmm1[13],zmm0[13]
; AVX512VLBW-NEXT: vpsrlq %xmm2, %zmm0, %zmm0
@@ -659,7 +653,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512F-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm2, %ymm5, %ymm5
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm6, %ymm5, %ymm5
@@ -683,7 +676,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VL-NEXT: vextracti64x4 $1, %zmm1, %ymm4
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm5 = ymm4[8],ymm3[8],ymm4[9],ymm3[9],ymm4[10],ymm3[10],ymm4[11],ymm3[11],ymm4[12],ymm3[12],ymm4[13],ymm3[13],ymm4[14],ymm3[14],ymm4[15],ymm3[15],ymm4[24],ymm3[24],ymm4[25],ymm3[25],ymm4[26],ymm3[26],ymm4[27],ymm3[27],ymm4[28],ymm3[28],ymm4[29],ymm3[29],ymm4[30],ymm3[30],ymm4[31],ymm3[31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm2, %ymm5, %ymm5
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm6 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpand %ymm6, %ymm5, %ymm5
@@ -705,7 +697,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm2, %zmm3, %zmm3
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm4, %zmm3, %zmm3
@@ -719,7 +710,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm2, %zmm3, %zmm3
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
; AVX512VBMI2-NEXT: vpsrlw %xmm2, %zmm0, %zmm1
@@ -731,7 +721,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm2, %zmm3, %zmm3
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VLBW-NEXT: vpandq %zmm4, %zmm3, %zmm3
@@ -745,7 +734,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %y, <64 x i8> %
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm3 = zmm1[8],zmm0[8],zmm1[9],zmm0[9],zmm1[10],zmm0[10],zmm1[11],zmm0[11],zmm1[12],zmm0[12],zmm1[13],zmm0[13],zmm1[14],zmm0[14],zmm1[15],zmm0[15],zmm1[24],zmm0[24],zmm1[25],zmm0[25],zmm1[26],zmm0[26],zmm1[27],zmm0[27],zmm1[28],zmm0[28],zmm1[29],zmm0[29],zmm1[30],zmm0[30],zmm1[31],zmm0[31],zmm1[40],zmm0[40],zmm1[41],zmm0[41],zmm1[42],zmm0[42],zmm1[43],zmm0[43],zmm1[44],zmm0[44],zmm1[45],zmm0[45],zmm1[46],zmm0[46],zmm1[47],zmm0[47],zmm1[56],zmm0[56],zmm1[57],zmm0[57],zmm1[58],zmm0[58],zmm1[59],zmm0[59],zmm1[60],zmm0[60],zmm1[61],zmm0[61],zmm1[62],zmm0[62],zmm1[63],zmm0[63]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm2, %xmm2
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm2 = xmm2[0],zero,zero,zero,zero,zero,zero,zero,xmm2[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsrlw %xmm2, %zmm3, %zmm3
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm1[0],zmm0[0],zmm1[1],zmm0[1],zmm1[2],zmm0[2],zmm1[3],zmm0[3],zmm1[4],zmm0[4],zmm1[5],zmm0[5],zmm1[6],zmm0[6],zmm1[7],zmm0[7],zmm1[16],zmm0[16],zmm1[17],zmm0[17],zmm1[18],zmm0[18],zmm1[19],zmm0[19],zmm1[20],zmm0[20],zmm1[21],zmm0[21],zmm1[22],zmm0[22],zmm1[23],zmm0[23],zmm1[32],zmm0[32],zmm1[33],zmm0[33],zmm1[34],zmm0[34],zmm1[35],zmm0[35],zmm1[36],zmm0[36],zmm1[37],zmm0[37],zmm1[38],zmm0[38],zmm1[39],zmm0[39],zmm1[48],zmm0[48],zmm1[49],zmm0[49],zmm1[50],zmm0[50],zmm1[51],zmm0[51],zmm1[52],zmm0[52],zmm1[53],zmm0[53],zmm1[54],zmm0[54],zmm1[55],zmm0[55]
; AVX512VLVBMI2-NEXT: vpsrlw %xmm2, %zmm0, %zmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
index 59a510aa0298..f3bb1c3cef0d 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-128.ll
@@ -853,27 +853,15 @@ define <2 x i64> @splatvar_funnnel_v2i64(<2 x i64> %x, <2 x i64> %amt) nounwind
}
define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: psrlq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT: psrlq %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE41-NEXT: psrlq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE41-NEXT: psrlq %xmm1, %xmm0
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE-NEXT: psrlq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE-NEXT: psrlq %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,2],xmm2[0,2]
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v4i32:
; AVX: # %bb.0:
@@ -948,10 +936,8 @@ define <4 x i32> @splatvar_funnnel_v4i32(<4 x i32> %x, <4 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: psrlq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-SSE2-NEXT: psrlq %xmm1, %xmm0
@@ -1103,35 +1089,19 @@ define <8 x i16> @splatvar_funnnel_v8i16(<8 x i16> %x, <8 x i16> %amt) nounwind
}
define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind {
-; SSE2-LABEL: splatvar_funnnel_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psrlw %xmm1, %xmm2
-; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE2-NEXT: pand %xmm3, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psrlw %xmm1, %xmm0
-; SSE2-NEXT: pand %xmm3, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_funnnel_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: psrlw %xmm1, %xmm2
-; SSE41-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
-; SSE41-NEXT: pand %xmm3, %xmm2
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: psrlw %xmm1, %xmm0
-; SSE41-NEXT: pand %xmm3, %xmm0
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_funnnel_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE-NEXT: psrlw %xmm1, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
+; SSE-NEXT: pand %xmm3, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: psrlw %xmm1, %xmm0
+; SSE-NEXT: pand %xmm3, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_funnnel_v16i8:
; AVX: # %bb.0:
@@ -1238,11 +1208,9 @@ define <16 x i8> @splatvar_funnnel_v16i8(<16 x i8> %x, <16 x i8> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v16i8:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psrlw %xmm1, %xmm2
; X86-SSE2-NEXT: movdqa {{.*#+}} xmm3 = [255,255,255,255,255,255,255,255]
; X86-SSE2-NEXT: pand %xmm3, %xmm2
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
index 267d9651d691..f80ce78b7982 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-256.ll
@@ -711,10 +711,8 @@ define <8 x i32> @splatvar_funnnel_v8i32(<8 x i32> %x, <8 x i32> %amt) nounwind
;
; AVX2-LABEL: splatvar_funnnel_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpsrlq %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
; AVX2-NEXT: vpsrlq %xmm1, %ymm0, %ymm0
@@ -935,7 +933,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX2-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -949,7 +946,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512F: # %bb.0:
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -963,7 +959,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VL: # %bb.0:
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -977,7 +972,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512BW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -991,7 +985,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512VLBW-NEXT: vmovdqa {{.*#+}} ymm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VLBW-NEXT: vpand %ymm3, %ymm2, %ymm2
@@ -1006,7 +999,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VBMI2-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,8,10,12,14,64,66,68,70,72,74,76,78,16,18,20,22,24,26,28,30,80,82,84,86,88,90,92,94]
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VBMI2-NEXT: vpsrlw %xmm1, %ymm0, %ymm0
@@ -1018,7 +1010,6 @@ define <32 x i8> @splatvar_funnnel_v32i8(<32 x i8> %x, <32 x i8> %amt) nounwind
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsrlw %xmm1, %ymm2, %ymm2
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
; AVX512VLVBMI2-NEXT: vpsrlw %xmm1, %ymm0, %ymm1
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
index 4a09a5e62ecf..a322679fe46a 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-512.ll
@@ -369,10 +369,9 @@ define <32 x i16> @splatvar_funnnel_v32i16(<32 x i16> %x, <32 x i16> %amt) nounw
define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind {
; AVX512F-LABEL: splatvar_funnnel_v64i8:
; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512F-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512F-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -392,10 +391,9 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
;
; AVX512VL-LABEL: splatvar_funnnel_v64i8:
; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsrlw %xmm1, %ymm3, %ymm3
; AVX512VL-NEXT: vmovdqa {{.*#+}} ymm4 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VL-NEXT: vpand %ymm4, %ymm3, %ymm3
@@ -417,7 +415,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512BW-NEXT: vpandq %zmm3, %zmm2, %zmm2
@@ -431,7 +428,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vmovdqa64 {{.*#+}} zmm3 = [255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255]
; AVX512VLBW-NEXT: vpandq %zmm3, %zmm2, %zmm2
@@ -445,7 +441,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512VBMI2-NEXT: vpsrlw %xmm1, %zmm0, %zmm1
@@ -457,7 +452,6 @@ define <64 x i8> @splatvar_funnnel_v64i8(<64 x i8> %x, <64 x i8> %amt) nounwind
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsrlw %xmm1, %zmm2, %zmm2
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
; AVX512VLVBMI2-NEXT: vpsrlw %xmm1, %zmm0, %zmm1
diff --git a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
index 911d72cdf9b5..a034790707f7 100644
--- a/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
+++ b/llvm/test/CodeGen/X86/vector-fshr-rot-sub128.ll
@@ -172,10 +172,8 @@ define <2 x i32> @var_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind {
; SSE2-LABEL: splatvar_funnnel_v2i32:
; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
; SSE2-NEXT: psrlq %xmm1, %xmm2
; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; SSE2-NEXT: psrlq %xmm1, %xmm0
@@ -275,10 +273,8 @@ define <2 x i32> @splatvar_funnnel_v2i32(<2 x i32> %x, <2 x i32> %amt) nounwind
;
; X86-SSE2-LABEL: splatvar_funnnel_v2i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: psrlq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-SSE2-NEXT: psrlq %xmm1, %xmm0
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
index 7a41ae1ea4b1..6755edfbe8a7 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-3.ll
@@ -240,8 +240,7 @@ define void @vf8(<24 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: pand %xmm6, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: por %xmm2, %xmm6
; SSE-NEXT: movaps %xmm1, (%rsi)
@@ -401,8 +400,7 @@ define void @vf16(<48 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm10[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
; SSE-NEXT: movdqa %xmm5, %xmm4
; SSE-NEXT: pandn %xmm3, %xmm4
; SSE-NEXT: por %xmm1, %xmm4
@@ -415,8 +413,7 @@ define void @vf16(<48 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[2,1,0,3,4,5,6,7]
; SSE-NEXT: pand %xmm5, %xmm1
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm8[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
; SSE-NEXT: pandn %xmm3, %xmm5
; SSE-NEXT: por %xmm1, %xmm5
; SSE-NEXT: movaps %xmm13, 16(%rsi)
@@ -549,7 +546,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movdqa 96(%rdi), %xmm11
; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 176(%rdi), %xmm7
-; SSE-NEXT: movdqa 144(%rdi), %xmm8
+; SSE-NEXT: movdqa 144(%rdi), %xmm9
; SSE-NEXT: movdqa 160(%rdi), %xmm5
; SSE-NEXT: movdqa 80(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
@@ -557,12 +554,12 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movdqa 16(%rdi), %xmm10
; SSE-NEXT: movdqa 32(%rdi), %xmm13
; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 48(%rdi), %xmm9
+; SSE-NEXT: movdqa 48(%rdi), %xmm8
; SSE-NEXT: movdqa 64(%rdi), %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,0,65535,65535,0,65535,65535,0]
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: movdqa %xmm9, %xmm3
+; SSE-NEXT: movdqa %xmm8, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
@@ -579,7 +576,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa %xmm0, %xmm2
; SSE-NEXT: pandn %xmm5, %xmm2
-; SSE-NEXT: movdqa %xmm8, %xmm3
+; SSE-NEXT: movdqa %xmm9, %xmm3
; SSE-NEXT: pand %xmm0, %xmm3
; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm3[0,2,1,3]
@@ -631,11 +628,11 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movaps %xmm0, (%rsp) # 16-byte Spill
; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,0,65535,65535,0,65535,65535]
; SSE-NEXT: movdqa %xmm3, %xmm5
-; SSE-NEXT: pandn %xmm9, %xmm5
+; SSE-NEXT: pandn %xmm8, %xmm5
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: pandn %xmm15, %xmm1
-; SSE-NEXT: pand %xmm3, %xmm9
-; SSE-NEXT: por %xmm1, %xmm9
+; SSE-NEXT: pand %xmm3, %xmm8
+; SSE-NEXT: por %xmm1, %xmm8
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm15[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
@@ -643,7 +640,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,65535,65535,0,0,0]
; SSE-NEXT: movdqa %xmm1, %xmm10
; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm9[2,1,2,3,4,5,6,7]
+; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
@@ -651,31 +648,31 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: pand %xmm1, %xmm0
; SSE-NEXT: por %xmm0, %xmm10
; SSE-NEXT: movdqa %xmm3, %xmm4
-; SSE-NEXT: pandn %xmm8, %xmm4
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: movdqa %xmm3, %xmm8
-; SSE-NEXT: pandn %xmm12, %xmm8
+; SSE-NEXT: pandn %xmm9, %xmm4
+; SSE-NEXT: movdqa %xmm9, %xmm0
+; SSE-NEXT: movdqa %xmm3, %xmm9
+; SSE-NEXT: pandn %xmm12, %xmm9
; SSE-NEXT: pand %xmm3, %xmm0
-; SSE-NEXT: por %xmm8, %xmm0
+; SSE-NEXT: por %xmm9, %xmm0
; SSE-NEXT: movdqa %xmm7, %xmm13
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,4,5,6]
-; SSE-NEXT: movdqa %xmm1, %xmm8
-; SSE-NEXT: pandn %xmm7, %xmm8
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: pandn %xmm7, %xmm9
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[2,1,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,2,3,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,5,5,5,5]
; SSE-NEXT: pand %xmm1, %xmm0
-; SSE-NEXT: por %xmm0, %xmm8
+; SSE-NEXT: por %xmm0, %xmm9
; SSE-NEXT: movdqa %xmm3, %xmm7
; SSE-NEXT: pandn %xmm14, %xmm7
; SSE-NEXT: movdqa %xmm14, %xmm0
; SSE-NEXT: movdqa %xmm3, %xmm14
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pandn %xmm9, %xmm14
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pandn %xmm8, %xmm14
; SSE-NEXT: pand %xmm3, %xmm0
; SSE-NEXT: por %xmm14, %xmm0
; SSE-NEXT: pshuflw $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
@@ -715,8 +712,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: por %xmm5, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm15[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: movdqa %xmm1, %xmm5
; SSE-NEXT: pandn %xmm2, %xmm5
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,1,2,0]
@@ -730,8 +726,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: por %xmm4, %xmm2
; SSE-NEXT: movdqa %xmm2, %xmm6
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm13[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[3,1,2,0]
@@ -740,15 +735,14 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: pand %xmm3, %xmm9
-; SSE-NEXT: por %xmm7, %xmm9
+; SSE-NEXT: pand %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm8
; SSE-NEXT: pshufhw $236, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm2, %xmm6
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm9[3,1,2,0]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm8[3,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,6,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,0,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
@@ -764,8 +758,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[2,1,0,3,4,5,6,7]
; SSE-NEXT: pand %xmm1, %xmm2
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm12[0,1,2,3,4,7,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,2]
; SSE-NEXT: pandn %xmm3, %xmm1
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
@@ -778,7 +771,7 @@ define void @vf32(<96 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.v
; SSE-NEXT: movaps %xmm2, 16(%rsi)
; SSE-NEXT: movdqa %xmm0, 32(%rdx)
; SSE-NEXT: movdqa %xmm14, (%rdx)
-; SSE-NEXT: movdqa %xmm8, 48(%rdx)
+; SSE-NEXT: movdqa %xmm9, 48(%rdx)
; SSE-NEXT: movdqa %xmm10, 16(%rdx)
; SSE-NEXT: movdqa %xmm1, 32(%rcx)
; SSE-NEXT: movdqa %xmm6, (%rcx)
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
index 144b1b12f0ea..a104919603f1 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i16-stride-6.ll
@@ -424,24 +424,24 @@ define void @vf8(<48 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[1,1,1,1]
; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm0[0]
-; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm6, %xmm0
-; SSE-NEXT: pandn %xmm7, %xmm0
+; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,65535]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm7, %xmm6
; SSE-NEXT: movdqa %xmm15, %xmm7
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm13[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm13[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,65535,65535,0,0,0]
; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE-NEXT: pand %xmm6, %xmm1
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm3[0,2]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm5[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm3[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movdqa %xmm6, %xmm3
; SSE-NEXT: pandn %xmm8, %xmm3
; SSE-NEXT: por %xmm1, %xmm3
; SSE-NEXT: movdqa %xmm10, %xmm1
@@ -450,49 +450,48 @@ define void @vf8(<48 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm7[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: pandn %xmm12, %xmm6
-; SSE-NEXT: por %xmm1, %xmm6
-; SSE-NEXT: pand %xmm0, %xmm6
+; SSE-NEXT: pand %xmm0, %xmm1
+; SSE-NEXT: pandn %xmm12, %xmm0
+; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm5[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE-NEXT: movdqa %xmm6, %xmm5
; SSE-NEXT: pandn %xmm1, %xmm5
-; SSE-NEXT: por %xmm6, %xmm5
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm15[2,3,2,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,4,6]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm10[1]
-; SSE-NEXT: movss {{.*#+}} xmm1 = xmm6[0],xmm1[1,2,3]
-; SSE-NEXT: andps %xmm0, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm4[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa %xmm0, %xmm7
-; SSE-NEXT: pandn %xmm6, %xmm7
-; SSE-NEXT: por %xmm1, %xmm7
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm13[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[2,3,2,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,4,5,4,6]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm10[1]
+; SSE-NEXT: movss {{.*#+}} xmm0 = xmm1[0],xmm0[1,2,3]
+; SSE-NEXT: andps %xmm6, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,4,6]
+; SSE-NEXT: movdqa %xmm6, %xmm7
+; SSE-NEXT: pandn %xmm1, %xmm7
+; SSE-NEXT: por %xmm0, %xmm7
; SSE-NEXT: psrlq $48, %xmm13
; SSE-NEXT: psrldq {{.*#+}} xmm15 = xmm15[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm13[0],xmm15[1],xmm13[1],xmm15[2],xmm13[2],xmm15[3],xmm13[3]
-; SSE-NEXT: movdqa %xmm10, %xmm6
-; SSE-NEXT: psrld $16, %xmm6
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,5,7]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
-; SSE-NEXT: movss {{.*#+}} xmm1 = xmm15[0],xmm1[1,2,3]
-; SSE-NEXT: andps %xmm0, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7]
-; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: por %xmm1, %xmm0
+; SSE-NEXT: movdqa %xmm10, %xmm1
+; SSE-NEXT: psrld $16, %xmm1
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm9[0,1,2,3,4,5,5,7]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
+; SSE-NEXT: movss {{.*#+}} xmm0 = xmm15[0],xmm0[1,2,3]
+; SSE-NEXT: andps %xmm6, %xmm0
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,4,5,7]
+; SSE-NEXT: pandn %xmm1, %xmm6
+; SSE-NEXT: por %xmm0, %xmm6
; SSE-NEXT: movaps %xmm11, (%rsi)
; SSE-NEXT: movaps %xmm2, (%rdx)
; SSE-NEXT: movdqa %xmm3, (%rcx)
; SSE-NEXT: movdqa %xmm5, (%r8)
; SSE-NEXT: movdqa %xmm7, (%r9)
-; SSE-NEXT: movdqa %xmm0, (%rax)
+; SSE-NEXT: movdqa %xmm6, (%rax)
; SSE-NEXT: retq
;
; AVX1-LABEL: vf8:
@@ -808,257 +807,259 @@ define void @vf8(<48 x i16>* %in.vec, <8 x i16>* %out.vec0, <8 x i16>* %out.vec1
define void @vf16(<96 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.vec1, <16 x i16>* %out.vec2, <16 x i16>* %out.vec3, <16 x i16>* %out.vec4, <16 x i16>* %out.vec5) nounwind {
; SSE-LABEL: vf16:
; SSE: # %bb.0:
-; SSE-NEXT: subq $104, %rsp
-; SSE-NEXT: movdqa 160(%rdi), %xmm0
+; SSE-NEXT: subq $88, %rsp
+; SSE-NEXT: movdqa 160(%rdi), %xmm4
; SSE-NEXT: movdqa 176(%rdi), %xmm12
-; SSE-NEXT: movdqa 64(%rdi), %xmm2
+; SSE-NEXT: movdqa 64(%rdi), %xmm0
; SSE-NEXT: movdqa 80(%rdi), %xmm7
-; SSE-NEXT: movdqa (%rdi), %xmm14
-; SSE-NEXT: movdqa 16(%rdi), %xmm11
+; SSE-NEXT: movaps (%rdi), %xmm1
+; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 16(%rdi), %xmm5
; SSE-NEXT: movdqa 32(%rdi), %xmm9
; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: psrlq $16, %xmm9
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm1[0,1,0,2,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm2[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm7[0],xmm3[1],xmm7[1],xmm3[2],xmm7[2],xmm3[3],xmm7[3]
-; SSE-NEXT: movdqa %xmm7, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[3,0]
-; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm1[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm6[0,1,0,2,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm0[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm15 = xmm15[0],xmm7[0],xmm15[1],xmm7[1],xmm15[2],xmm7[2],xmm15[3],xmm7[3]
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm0[3,0]
; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm7[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm7[2,3]
-; SSE-NEXT: movaps %xmm2, (%rsp) # 16-byte Spill
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm7[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm7[2,3]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pslld $16, %xmm7
; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm7[0],xmm1[1],xmm7[1],xmm1[2],xmm7[2],xmm1[3],xmm7[3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm10[1,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
+; SSE-NEXT: # xmm11 = mem[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm11[0,1,2,3,4,6,6,7]
; SSE-NEXT: movdqa %xmm5, %xmm2
-; SSE-NEXT: pandn %xmm10, %xmm2
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm14[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm11[2],xmm4[3],xmm11[3]
-; SSE-NEXT: pand %xmm5, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[2,0]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm5[2],xmm7[3],xmm5[3]
+; SSE-NEXT: pand %xmm3, %xmm7
+; SSE-NEXT: por %xmm0, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm1[2,0]
+; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm4, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm12[0],xmm0[1],xmm12[1],xmm0[2],xmm12[2],xmm0[3],xmm12[3]
+; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm1[3,0]
; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm12[0],xmm2[1],xmm12[1],xmm2[2],xmm12[2],xmm2[3],xmm12[3]
-; SSE-NEXT: movdqa %xmm12, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[3,0]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm12[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm12[2,3]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm0, %xmm4
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm12[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm12[2,3]
-; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pslld $16, %xmm12
; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm12[0],xmm4[1],xmm12[1],xmm4[2],xmm12[2],xmm4[3],xmm12[3]
-; SSE-NEXT: movdqa 128(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 128(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm5, %xmm1
+; SSE-NEXT: movdqa %xmm5, %xmm14
; SSE-NEXT: psrlq $16, %xmm1
-; SSE-NEXT: movdqa 144(%rdi), %xmm0
-; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm0[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm10[0,1,0,2,4,5,6,7]
+; SSE-NEXT: movdqa 144(%rdi), %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm5[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm8 = xmm12[0,1,0,2,4,5,6,7]
; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[3,1],xmm8[1,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm8 = xmm8[0],xmm1[0],xmm8[1],xmm1[1]
-; SSE-NEXT: movdqa %xmm5, %xmm7
+; SSE-NEXT: movdqa %xmm3, %xmm7
; SSE-NEXT: pandn %xmm8, %xmm7
-; SSE-NEXT: movdqa 112(%rdi), %xmm12
+; SSE-NEXT: movdqa 112(%rdi), %xmm10
; SSE-NEXT: movdqa 96(%rdi), %xmm13
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm13[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm6[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm12[2],xmm9[3],xmm12[3]
-; SSE-NEXT: pand %xmm5, %xmm9
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm5[0,1,2,3,4,6,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm9 = xmm9[2],xmm10[2],xmm9[3],xmm10[3]
+; SSE-NEXT: pand %xmm3, %xmm9
; SSE-NEXT: por %xmm7, %xmm9
; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[0,1],xmm4[2,0]
-; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,1,1,3,4,5,6,7]
-; SSE-NEXT: movdqa %xmm3, %xmm0
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm4[1,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm6[0,1,1,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm15, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[3,1],xmm4[1,3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm1[1,1,1,1]
; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: movdqa %xmm3, %xmm7
+; SSE-NEXT: pandn %xmm4, %xmm7
+; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: psrld $16, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm15[0,1,2,3,5,7,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm8 = xmm11[0,1,2,3,5,7,6,7]
; SSE-NEXT: punpckhdq {{.*#+}} xmm8 = xmm8[2],xmm4[2],xmm8[3],xmm4[3]
-; SSE-NEXT: pand %xmm5, %xmm8
-; SSE-NEXT: por %xmm3, %xmm8
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm0[2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm10[0,1,1,3,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm3[1,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm15[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm4[0],xmm3[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm12, %xmm4
-; SSE-NEXT: psrld $16, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm6[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
-; SSE-NEXT: pand %xmm5, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm5
-; SSE-NEXT: por %xmm0, %xmm5
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[0,1],xmm2[2,0]
-; SSE-NEXT: movdqa %xmm1, %xmm2
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm0[0]
+; SSE-NEXT: pand %xmm3, %xmm8
+; SSE-NEXT: por %xmm7, %xmm8
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[0,1],xmm6[2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm12[0,1,1,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm4[1,3]
+; SSE-NEXT: movdqa %xmm14, %xmm11
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm14[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1]
+; SSE-NEXT: movdqa %xmm10, %xmm6
+; SSE-NEXT: psrld $16, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm6[2],xmm5[3],xmm6[3]
+; SSE-NEXT: pand %xmm3, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm3
+; SSE-NEXT: por %xmm5, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm0[2,0]
+; SSE-NEXT: movdqa %xmm1, %xmm4
+; SSE-NEXT: movdqa %xmm1, %xmm14
+; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm12 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm0[0]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [65535,65535,65535,0,0,65535,65535,65535]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm11[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm11[2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,2,2,3,4,5,6,7]
+; SSE-NEXT: movdqa %xmm0, %xmm6
+; SSE-NEXT: pandn %xmm4, %xmm6
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[1,0],xmm2[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[2,0],xmm2[2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm7[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[1,0,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm4
-; SSE-NEXT: por %xmm3, %xmm4
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm4[1,0,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: por %xmm6, %xmm5
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = xmm1[0,1],mem[0,2]
; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm11 = xmm3[0,1,2,3,4,6,5,4]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm1[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,6,5,4]
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,65535,65535,65535,65535,0,0,0]
; SSE-NEXT: movdqa %xmm6, %xmm1
-; SSE-NEXT: pandn %xmm11, %xmm1
-; SSE-NEXT: pand %xmm6, %xmm4
-; SSE-NEXT: por %xmm4, %xmm1
+; SSE-NEXT: pandn %xmm4, %xmm1
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: por %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm15, %xmm4
+; SSE-NEXT: movdqa %xmm11, %xmm4
+; SSE-NEXT: movdqa %xmm11, (%rsp) # 16-byte Spill
; SSE-NEXT: psrldq {{.*#+}} xmm4 = xmm4[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[1,1,1,1]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm4, %xmm3
-; SSE-NEXT: movdqa %xmm13, %xmm11
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[1,0],xmm12[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm11 = xmm11[2,0],xmm12[2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm2[1,1,1,1]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm5[0]
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: movdqa %xmm13, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm10[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm10[2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm4[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm15[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,0,2,3,4,5,6,7]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: por %xmm3, %xmm1
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = xmm3[0,1],mem[0,2]
-; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa %xmm6, %xmm14
-; SSE-NEXT: pandn %xmm3, %xmm14
+; SSE-NEXT: por %xmm5, %xmm1
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
+; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: # xmm5 = xmm5[0,1],mem[0,2]
+; SSE-NEXT: movaps %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,6,5,4]
+; SSE-NEXT: movdqa %xmm6, %xmm15
+; SSE-NEXT: pandn %xmm5, %xmm15
; SSE-NEXT: pand %xmm6, %xmm1
-; SSE-NEXT: por %xmm1, %xmm14
-; SSE-NEXT: psrlq $48, %xmm10
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm10[0]
+; SSE-NEXT: por %xmm1, %xmm15
+; SSE-NEXT: movdqa %xmm12, %xmm1
+; SSE-NEXT: psrlq $48, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm14[2,2,3,3]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm1[0]
; SSE-NEXT: movdqa %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm1
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm2[0,1,3,3,4,5,6,7]
-; SSE-NEXT: pand %xmm0, %xmm3
-; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm5, %xmm1
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm7[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[0,1,3,3,4,5,6,7]
+; SSE-NEXT: pand %xmm0, %xmm5
+; SSE-NEXT: por %xmm1, %xmm5
; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm6, %xmm10
-; SSE-NEXT: pandn %xmm1, %xmm10
-; SSE-NEXT: pand %xmm6, %xmm3
-; SSE-NEXT: por %xmm3, %xmm10
-; SSE-NEXT: movdqa %xmm7, %xmm2
-; SSE-NEXT: movdqa %xmm7, %xmm1
-; SSE-NEXT: psrlq $48, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm15[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm1[0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm11[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
+; SSE-NEXT: movdqa %xmm6, %xmm12
+; SSE-NEXT: pandn %xmm1, %xmm12
+; SSE-NEXT: pand %xmm6, %xmm5
+; SSE-NEXT: por %xmm5, %xmm12
+; SSE-NEXT: psrlq $48, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[2,2,3,3]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm2[0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm4[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,1,3,3,4,5,6,7]
; SSE-NEXT: pand %xmm0, %xmm1
-; SSE-NEXT: pandn %xmm3, %xmm0
+; SSE-NEXT: pandn %xmm5, %xmm0
; SSE-NEXT: por %xmm1, %xmm0
; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
; SSE-NEXT: # xmm1 = mem[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,7,4,5]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,2]
; SSE-NEXT: movdqa %xmm6, %xmm11
; SSE-NEXT: pandn %xmm1, %xmm11
; SSE-NEXT: pand %xmm6, %xmm0
; SSE-NEXT: por %xmm0, %xmm11
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
-; SSE-NEXT: # xmm0 = mem[1,1,1,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm3[2,3,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,1,1]
+; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[2,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm4[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm15[0,1,2,3,4,5,4,6]
-; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = xmm4[1],mem[1]
-; SSE-NEXT: movss {{.*#+}} xmm4 = xmm1[0],xmm4[1,2,3]
-; SSE-NEXT: pshuflw $232, (%rsp), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm14 = xmm14[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm14[0,1,2,3,4,5,4,6]
+; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: # xmm5 = xmm5[1],mem[1]
+; SSE-NEXT: movss {{.*#+}} xmm5 = xmm1[0],xmm5[1,2,3]
+; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
; SSE-NEXT: movdqa %xmm6, %xmm7
; SSE-NEXT: pandn %xmm0, %xmm7
-; SSE-NEXT: andps %xmm6, %xmm4
-; SSE-NEXT: por %xmm4, %xmm7
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm12[1,1,1,1]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,3,2,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm0[0],xmm4[1],xmm0[1],xmm4[2],xmm0[2],xmm4[3],xmm0[3]
-; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: andps %xmm6, %xmm5
+; SSE-NEXT: por %xmm5, %xmm7
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm10[1,1,1,1]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm13[2,3,2,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
+; SSE-NEXT: pshufd $196, (%rsp), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = mem[0,1,0,3]
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,4,6]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm2[1]
-; SSE-NEXT: movss {{.*#+}} xmm0 = xmm4[0],xmm0[1,2,3]
-; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,5,4,6]
+; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = xmm0[1],mem[1]
+; SSE-NEXT: movss {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3]
+; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: # xmm5 = mem[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,5,4,6]
; SSE-NEXT: movdqa %xmm6, %xmm2
-; SSE-NEXT: pandn %xmm4, %xmm2
+; SSE-NEXT: pandn %xmm5, %xmm2
; SSE-NEXT: andps %xmm6, %xmm0
; SSE-NEXT: por %xmm0, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: psrlq $48, %xmm1
-; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm1[0],xmm3[1],xmm1[1],xmm3[2],xmm1[2],xmm3[3],xmm1[3]
+; SSE-NEXT: psrlq $48, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm4[0],xmm0[1],xmm4[1],xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: movdqa %xmm0, %xmm5
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,4,5,5,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm14[0,1,2,3,4,5,5,7]
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
-; SSE-NEXT: movss {{.*#+}} xmm0 = xmm3[0],xmm0[1,2,3]
-; SSE-NEXT: pshuflw $231, (%rsp), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
+; SSE-NEXT: movss {{.*#+}} xmm0 = xmm5[0],xmm0[1,2,3]
+; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7]
+; SSE-NEXT: movdqa %xmm6, %xmm5
+; SSE-NEXT: pandn %xmm4, %xmm5
; SSE-NEXT: andps %xmm6, %xmm0
-; SSE-NEXT: por %xmm0, %xmm4
-; SSE-NEXT: psrlq $48, %xmm12
+; SSE-NEXT: por %xmm0, %xmm5
+; SSE-NEXT: psrlq $48, %xmm10
; SSE-NEXT: psrldq {{.*#+}} xmm13 = xmm13[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm12[0],xmm13[1],xmm12[1],xmm13[2],xmm12[2],xmm13[3],xmm12[3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm13 = xmm13[0],xmm10[0],xmm13[1],xmm10[1],xmm13[2],xmm10[2],xmm13[3],xmm10[3]
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: psrld $16, %xmm1
; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
@@ -1066,28 +1067,28 @@ define void @vf16(<96 x i16>* %in.vec, <16 x i16>* %out.vec0, <16 x i16>* %out.v
; SSE-NEXT: punpckhqdq {{.*#+}} xmm0 = xmm0[1],xmm1[1]
; SSE-NEXT: movss {{.*#+}} xmm0 = xmm13[0],xmm0[1,2,3]
; SSE-NEXT: andps %xmm6, %xmm0
-; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
-; SSE-NEXT: # xmm3 = mem[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,4,5,7]
-; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
+; SSE-NEXT: # xmm4 = mem[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,4,5,7]
+; SSE-NEXT: pandn %xmm4, %xmm6
; SSE-NEXT: por %xmm0, %xmm6
; SSE-NEXT: movaps %xmm9, 16(%rsi)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rsi)
-; SSE-NEXT: movaps %xmm5, 16(%rdx)
+; SSE-NEXT: movaps %xmm3, 16(%rdx)
; SSE-NEXT: movaps %xmm8, (%rdx)
-; SSE-NEXT: movdqa %xmm14, 16(%rcx)
+; SSE-NEXT: movdqa %xmm15, 16(%rcx)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%rcx)
; SSE-NEXT: movdqa %xmm11, 16(%r8)
-; SSE-NEXT: movdqa %xmm10, (%r8)
+; SSE-NEXT: movdqa %xmm12, (%r8)
; SSE-NEXT: movdqa %xmm2, 16(%r9)
; SSE-NEXT: movdqa %xmm7, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
; SSE-NEXT: movdqa %xmm6, 16(%rax)
-; SSE-NEXT: movdqa %xmm4, (%rax)
-; SSE-NEXT: addq $104, %rsp
+; SSE-NEXT: movdqa %xmm5, (%rax)
+; SSE-NEXT: addq $88, %rsp
; SSE-NEXT: retq
;
; AVX1-LABEL: vf16:
@@ -1655,13 +1656,13 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-LABEL: vf32:
; SSE: # %bb.0:
; SSE-NEXT: subq $456, %rsp # imm = 0x1C8
-; SSE-NEXT: movdqa 256(%rdi), %xmm3
-; SSE-NEXT: movdqa 272(%rdi), %xmm0
-; SSE-NEXT: movdqa 64(%rdi), %xmm2
+; SSE-NEXT: movdqa 256(%rdi), %xmm2
+; SSE-NEXT: movdqa 272(%rdi), %xmm13
+; SSE-NEXT: movdqa 64(%rdi), %xmm0
; SSE-NEXT: movdqa 80(%rdi), %xmm6
-; SSE-NEXT: movaps (%rdi), %xmm1
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 16(%rdi), %xmm14
+; SSE-NEXT: movdqa (%rdi), %xmm4
+; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 16(%rdi), %xmm7
; SSE-NEXT: movdqa 32(%rdi), %xmm9
; SSE-NEXT: movdqa %xmm9, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa 48(%rdi), %xmm1
@@ -1670,223 +1671,223 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshuflw {{.*#+}} xmm10 = xmm1[0,1,0,2,4,5,6,7]
-; SSE-NEXT: movdqa %xmm2, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm2[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm4 = xmm4[0],xmm6[0],xmm4[1],xmm6[1],xmm4[2],xmm6[2],xmm4[3],xmm6[3]
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm2[3,0]
-; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[1,0],xmm6[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm6[2,3]
-; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm0[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm6[0],xmm3[1],xmm6[1],xmm3[2],xmm6[2],xmm3[3],xmm6[3]
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm6, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[3,0]
+; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm6[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm6[2,3]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pslld $16, %xmm6
; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm6[0],xmm1[1],xmm6[1],xmm1[2],xmm6[2],xmm1[3],xmm6[3]
; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm10[1,3]
; SSE-NEXT: punpckldq {{.*#+}} xmm10 = xmm10[0],xmm9[0],xmm10[1],xmm9[1]
-; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,0,0,0,65535,65535]
-; SSE-NEXT: movdqa %xmm12, %xmm2
-; SSE-NEXT: pandn %xmm10, %xmm2
-; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm6[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm4 = xmm4[2],xmm14[2],xmm4[3],xmm14[3]
-; SSE-NEXT: pand %xmm12, %xmm4
-; SSE-NEXT: por %xmm2, %xmm4
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[0,1],xmm1[2,0]
-; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm1
-; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm3[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm9 = xmm9[0],xmm0[0],xmm9[1],xmm0[1],xmm9[2],xmm0[2],xmm9[3],xmm0[3]
-; SSE-NEXT: movdqa %xmm0, %xmm2
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[2,0],xmm3[3,0]
-; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm3, %xmm2
+; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,0,0,0,65535,65535]
+; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pandn %xmm10, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm4[0,1,0,3]
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm7[2],xmm3[3],xmm7[3]
+; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: por %xmm0, %xmm3
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm1[2,0]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm0[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm0[2,3]
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pslld $16, %xmm0
-; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm0[0],xmm2[1],xmm0[1],xmm2[2],xmm0[2],xmm2[3],xmm0[3]
-; SSE-NEXT: movdqa 224(%rdi), %xmm8
-; SSE-NEXT: movdqa %xmm8, %xmm0
-; SSE-NEXT: psrlq $16, %xmm0
-; SSE-NEXT: movdqa 240(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,3,2,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm1[0,1,0,2,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm3[1,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm0[0],xmm3[1],xmm0[1]
-; SSE-NEXT: movdqa %xmm12, %xmm0
-; SSE-NEXT: pandn %xmm3, %xmm0
-; SSE-NEXT: movdqa 208(%rdi), %xmm11
-; SSE-NEXT: movdqa 192(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm11[2],xmm1[3],xmm11[3]
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: por %xmm0, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm2[2,0]
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 160(%rdi), %xmm1
+; SSE-NEXT: movdqa %xmm2, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm2[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3]
+; SSE-NEXT: movdqa %xmm13, %xmm1
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[3,0]
+; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm2, %xmm1
+; SSE-NEXT: movaps %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm13[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm13[2,3]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pslld $16, %xmm13
+; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm13[0],xmm1[1],xmm13[1],xmm1[2],xmm13[2],xmm1[3],xmm13[3]
+; SSE-NEXT: movdqa 224(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $16, %xmm2
+; SSE-NEXT: movdqa 240(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm11 = xmm0[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[0,1,0,2,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm3[1,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: movdqa 208(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 192(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,0,3]
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm1[2,0]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 160(%rdi), %xmm0
; SSE-NEXT: movdqa 176(%rdi), %xmm2
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm1[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[3,0]
+; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm0[3,0]
; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm3
-; SSE-NEXT: movaps %xmm1, (%rsp) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm2[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm2[2,3]
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm3
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm2[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm2[2,3]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pslld $16, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE-NEXT: movdqa 128(%rdi), %xmm5
-; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $16, %xmm5
-; SSE-NEXT: movdqa 144(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm2[0,1,0,2,4,5,6,7]
+; SSE-NEXT: movdqa 128(%rdi), %xmm2
+; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $16, %xmm2
+; SSE-NEXT: movdqa 144(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm0[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm8[0,1,0,2,4,5,6,7]
; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[3,1],xmm4[1,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm5[0],xmm4[1],xmm5[1]
-; SSE-NEXT: movdqa %xmm12, %xmm5
-; SSE-NEXT: pandn %xmm4, %xmm5
+; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm2[0],xmm4[1],xmm2[1]
+; SSE-NEXT: movdqa %xmm14, %xmm2
+; SSE-NEXT: pandn %xmm4, %xmm2
; SSE-NEXT: movdqa 112(%rdi), %xmm4
; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 96(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm13 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm13[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm4[2],xmm1[3],xmm4[3]
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: por %xmm5, %xmm1
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,1],xmm3[2,0]
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 352(%rdi), %xmm1
-; SSE-NEXT: movdqa 368(%rdi), %xmm4
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm1[2,2,3,3]
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm4[0],xmm5[1],xmm4[1],xmm5[2],xmm4[2],xmm5[3],xmm4[3]
-; SSE-NEXT: movdqa %xmm4, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[2,0],xmm1[3,0]
-; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[1,0],xmm4[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[2,0],xmm4[2,3]
-; SSE-NEXT: movaps %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pslld $16, %xmm4
-; SSE-NEXT: psrldq {{.*#+}} xmm7 = xmm7[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm7 = xmm7[0],xmm4[0],xmm7[1],xmm4[1],xmm7[2],xmm4[2],xmm7[3],xmm4[3]
-; SSE-NEXT: movdqa 320(%rdi), %xmm4
-; SSE-NEXT: movdqa %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $16, %xmm4
-; SSE-NEXT: movdqa 336(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm10 = xmm1[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm15 = xmm10[0,1,0,2,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[3,1],xmm15[1,3]
-; SSE-NEXT: punpckldq {{.*#+}} xmm15 = xmm15[0],xmm4[0],xmm15[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm12, %xmm4
-; SSE-NEXT: pandn %xmm15, %xmm4
-; SSE-NEXT: movdqa 304(%rdi), %xmm15
-; SSE-NEXT: movdqa %xmm15, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa 288(%rdi), %xmm1
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm1[0,1,2,3,4,6,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm3 = xmm3[2],xmm15[2],xmm3[3],xmm15[3]
-; SSE-NEXT: pand %xmm12, %xmm3
-; SSE-NEXT: por %xmm4, %xmm3
-; SSE-NEXT: shufps {{.*#+}} xmm3 = xmm3[0,1],xmm7[2,0]
-; SSE-NEXT: movaps %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,1,1,3,4,5,6,7]
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm15 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[3,1],xmm4[1,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm3[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm12, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: psrld $16, %xmm4
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; SSE-NEXT: pand %xmm12, %xmm6
-; SSE-NEXT: por %xmm7, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm15[2,0]
-; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[0,1,1,3,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm9 = xmm9[3,1],xmm4[1,3]
-; SSE-NEXT: movdqa %xmm8, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm4 = xmm4[0],xmm7[0],xmm4[1],xmm7[1]
-; SSE-NEXT: movdqa %xmm12, %xmm7
-; SSE-NEXT: pandn %xmm4, %xmm7
-; SSE-NEXT: movdqa %xmm11, %xmm4
-; SSE-NEXT: psrld $16, %xmm4
-; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm4[2],xmm6[3],xmm4[3]
-; SSE-NEXT: pand %xmm12, %xmm6
-; SSE-NEXT: por %xmm7, %xmm6
-; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm9[2,0]
+; SSE-NEXT: movdqa 96(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm15 = xmm0[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm15[0,1,2,3,4,6,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm4[2],xmm0[3],xmm4[3]
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: por %xmm2, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm3[2,0]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa 352(%rdi), %xmm0
+; SSE-NEXT: movdqa 368(%rdi), %xmm3
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm3[0],xmm2[1],xmm3[1],xmm2[2],xmm3[2],xmm2[3],xmm3[3]
+; SSE-NEXT: movdqa %xmm3, %xmm4
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm0[3,0]
+; SSE-NEXT: movaps %xmm4, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm0, %xmm5
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,0],xmm3[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[2,0],xmm3[2,3]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pslld $16, %xmm3
+; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm3[0],xmm5[1],xmm3[1],xmm5[2],xmm3[2],xmm5[3],xmm3[3]
+; SSE-NEXT: movdqa 320(%rdi), %xmm3
+; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $16, %xmm3
+; SSE-NEXT: movdqa 336(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm9 = xmm0[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm12 = xmm9[0,1,0,2,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm12[1,3]
+; SSE-NEXT: punpckldq {{.*#+}} xmm12 = xmm12[0],xmm3[0],xmm12[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: pandn %xmm12, %xmm3
+; SSE-NEXT: movdqa 304(%rdi), %xmm13
+; SSE-NEXT: movdqa 288(%rdi), %xmm0
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshufd {{.*#+}} xmm12 = xmm0[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm12[0,1,2,3,4,6,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm6 = xmm6[2],xmm13[2],xmm6[3],xmm13[3]
+; SSE-NEXT: pand %xmm14, %xmm6
+; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: shufps {{.*#+}} xmm6 = xmm6[0,1],xmm5[2,0]
; SSE-NEXT: movaps %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,1,3,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm2[1,3]
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
-; SSE-NEXT: # xmm4 = mem[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[1],xmm4[1]
-; SSE-NEXT: movdqa %xmm12, %xmm4
-; SSE-NEXT: pandn %xmm2, %xmm4
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
-; SSE-NEXT: psrld $16, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm13[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm2[2],xmm15[3],xmm2[3]
-; SSE-NEXT: pand %xmm12, %xmm15
-; SSE-NEXT: por %xmm4, %xmm15
-; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm0[2,0]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm10[0,1,1,3,4,5,6,7]
-; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[3,1],xmm0[1,3]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[1,1,1,1]
-; SSE-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm10, %xmm2
-; SSE-NEXT: psrld $16, %xmm2
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,7,6,7]
-; SSE-NEXT: punpckhdq {{.*#+}} xmm1 = xmm1[2],xmm2[2],xmm1[3],xmm2[3]
-; SSE-NEXT: pand %xmm12, %xmm1
-; SSE-NEXT: pandn %xmm0, %xmm12
-; SSE-NEXT: por %xmm1, %xmm12
-; SSE-NEXT: shufps {{.*#+}} xmm12 = xmm12[0,1],xmm5[2,0]
-; SSE-NEXT: movdqa %xmm3, %xmm0
+; SSE-NEXT: pshuflw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[0,1,1,3,4,5,6,7]
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[3,1],xmm3[1,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm4[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm14, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm7, %xmm6
+; SSE-NEXT: movdqa %xmm7, %xmm3
+; SSE-NEXT: psrld $16, %xmm3
+; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
+; SSE-NEXT: # xmm7 = mem[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm7 = xmm7[2],xmm3[2],xmm7[3],xmm3[3]
+; SSE-NEXT: pand %xmm14, %xmm7
+; SSE-NEXT: por %xmm5, %xmm7
+; SSE-NEXT: shufps {{.*#+}} xmm7 = xmm7[0,1],xmm0[2,0]
+; SSE-NEXT: movaps %xmm7, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm11[0,1,1,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm10 = xmm10[3,1],xmm3[1,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm7[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm14, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm11, %xmm3
+; SSE-NEXT: psrld $16, %xmm3
+; SSE-NEXT: pshufhw $237, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: # xmm0 = mem[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: por %xmm5, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,1],xmm10[2,0]
+; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,1,1,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm1 = xmm1[3,1],xmm3[1,3]
+; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
+; SSE-NEXT: # xmm5 = mem[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[1],xmm5[1]
+; SSE-NEXT: movdqa %xmm14, %xmm5
+; SSE-NEXT: pandn %xmm3, %xmm5
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Reload
+; SSE-NEXT: psrld $16, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm15 = xmm15[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm15 = xmm15[2],xmm3[2],xmm15[3],xmm3[3]
+; SSE-NEXT: pand %xmm14, %xmm15
+; SSE-NEXT: por %xmm5, %xmm15
+; SSE-NEXT: shufps {{.*#+}} xmm15 = xmm15[0,1],xmm1[2,0]
+; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm9[0,1,1,3,4,5,6,7]
+; SSE-NEXT: shufps {{.*#+}} xmm2 = xmm2[3,1],xmm1[1,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm8[1,1,1,1]
+; SSE-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm3[0],xmm1[1],xmm3[1]
+; SSE-NEXT: movdqa %xmm13, %xmm3
+; SSE-NEXT: psrld $16, %xmm3
+; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm12[0,1,2,3,5,7,6,7]
+; SSE-NEXT: punpckhdq {{.*#+}} xmm0 = xmm0[2],xmm3[2],xmm0[3],xmm3[3]
+; SSE-NEXT: pand %xmm14, %xmm0
+; SSE-NEXT: pandn %xmm1, %xmm14
+; SSE-NEXT: por %xmm0, %xmm14
+; SSE-NEXT: shufps {{.*#+}} xmm14 = xmm14[0,1],xmm2[2,0]
+; SSE-NEXT: movdqa %xmm4, %xmm0
; SSE-NEXT: psrldq {{.*#+}} xmm0 = xmm0[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[1,1,1,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm10[1,1,1,1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
; SSE-NEXT: movdqa {{.*#+}} xmm1 = [65535,65535,65535,0,0,65535,65535,65535]
; SSE-NEXT: movdqa %xmm1, %xmm2
; SSE-NEXT: pandn %xmm0, %xmm2
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm14, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm14[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm14[2,3]
+; SSE-NEXT: movdqa %xmm6, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[1,0],xmm6[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm4 = xmm4[2,0],xmm6[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm4[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm0[1,0,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm1, %xmm5
-; SSE-NEXT: por %xmm2, %xmm5
+; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm0[1,0,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm3
+; SSE-NEXT: por %xmm2, %xmm3
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
@@ -1894,71 +1895,72 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa {{.*#+}} xmm14 = [65535,65535,65535,65535,65535,0,0,0]
-; SSE-NEXT: movdqa %xmm14, %xmm3
+; SSE-NEXT: movdqa {{.*#+}} xmm12 = [65535,65535,65535,65535,65535,0,0,0]
+; SSE-NEXT: movdqa %xmm12, %xmm5
+; SSE-NEXT: pandn %xmm2, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm3
+; SSE-NEXT: por %xmm3, %xmm5
+; SSE-NEXT: movdqa %xmm5, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm7, %xmm2
+; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
+; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
+; SSE-NEXT: # xmm3 = mem[1,1,1,1]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT: movdqa %xmm1, %xmm3
; SSE-NEXT: pandn %xmm2, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm5
-; SSE-NEXT: por %xmm5, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrldq {{.*#+}} xmm8 = xmm8[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm9[1,1,1,1]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm5[0]
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: pandn %xmm8, %xmm7
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm11, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[1,0],xmm11[0,0]
; SSE-NEXT: shufps {{.*#+}} xmm5 = xmm5[2,0],xmm11[2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm5[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,0,2,3,4,5,6,7]
; SSE-NEXT: pand %xmm1, %xmm2
-; SSE-NEXT: por %xmm7, %xmm2
+; SSE-NEXT: por %xmm3, %xmm2
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
-; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
+; SSE-NEXT: shufps $132, (%rsp), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm7, %xmm3
-; SSE-NEXT: pand %xmm14, %xmm2
-; SSE-NEXT: por %xmm2, %xmm3
-; SSE-NEXT: movdqa %xmm3, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: movdqa %xmm6, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
+; SSE-NEXT: movdqa %xmm12, %xmm6
+; SSE-NEXT: pandn %xmm3, %xmm6
+; SSE-NEXT: pand %xmm12, %xmm2
+; SSE-NEXT: por %xmm2, %xmm6
+; SSE-NEXT: movdqa %xmm6, (%rsp) # 16-byte Spill
+; SSE-NEXT: movdqa %xmm8, %xmm2
; SSE-NEXT: psrldq {{.*#+}} xmm2 = xmm2[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: # xmm7 = mem[1,1,1,1]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm7[0]
-; SSE-NEXT: movdqa %xmm1, %xmm7
-; SSE-NEXT: pandn %xmm2, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm9[1,1,1,1]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm3[0]
+; SSE-NEXT: movdqa %xmm1, %xmm3
+; SSE-NEXT: pandn %xmm2, %xmm3
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm8 # 16-byte Reload
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm10[0,0]
-; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm10[2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm8[0,2,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,3,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,0,2,3,4,5,6,7]
-; SSE-NEXT: pand %xmm1, %xmm3
-; SSE-NEXT: por %xmm7, %xmm3
+; SSE-NEXT: movdqa %xmm13, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[1,0],xmm13[0,0]
+; SSE-NEXT: shufps {{.*#+}} xmm8 = xmm8[2,0],xmm13[2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm8[0,2,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,3,2,3]
+; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[1,0,2,3,4,5,6,7]
+; SSE-NEXT: pand %xmm1, %xmm7
+; SSE-NEXT: por %xmm3, %xmm7
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Folded Reload
; SSE-NEXT: # xmm0 = xmm0[0,1],mem[0,2]
; SSE-NEXT: movaps %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm0[0,1,2,3,4,6,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm7, %xmm2
-; SSE-NEXT: pand %xmm14, %xmm3
-; SSE-NEXT: por %xmm3, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm0[0,1,2,3,4,6,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,2,0]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,6,5,4]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm3, %xmm2
+; SSE-NEXT: pand %xmm12, %xmm7
+; SSE-NEXT: por %xmm7, %xmm2
; SSE-NEXT: movdqa %xmm2, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Reload
; SSE-NEXT: movdqa %xmm11, %xmm3
; SSE-NEXT: psrldq {{.*#+}} xmm3 = xmm3[2,3,4,5,6,7,8,9,10,11,12,13,14,15],zero,zero
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm10[1,1,1,1]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm13[1,1,1,1]
; SSE-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm7[0]
; SSE-NEXT: movdqa %xmm1, %xmm7
; SSE-NEXT: pandn %xmm3, %xmm7
@@ -1971,21 +1973,21 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: pshuflw {{.*#+}} xmm6 = xmm6[1,0,2,3,4,5,6,7]
; SSE-NEXT: pand %xmm1, %xmm6
; SSE-NEXT: por %xmm7, %xmm6
-; SSE-NEXT: movaps (%rsp), %xmm2 # 16-byte Reload
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
; SSE-NEXT: shufps $132, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = xmm2[0,1],mem[0,2]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm2[0,1,2,3,4,6,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,2,0]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,6,5,4]
-; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: pandn %xmm7, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm6
+; SSE-NEXT: pand %xmm12, %xmm6
; SSE-NEXT: por %xmm6, %xmm0
-; SSE-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
-; SSE-NEXT: psrlq $48, %xmm13
+; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm10
; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
; SSE-NEXT: # xmm7 = mem[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm13[0]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm7 = xmm7[0],xmm10[0]
; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: pandn %xmm7, %xmm6
; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,1,2,3,4,5,6,7]
@@ -1995,17 +1997,18 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
; SSE-NEXT: # xmm6 = mem[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: pandn %xmm6, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm4
+; SSE-NEXT: pand %xmm12, %xmm4
; SSE-NEXT: por %xmm4, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $48, %xmm9
-; SSE-NEXT: pshufd $250, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm9[0]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
+; SSE-NEXT: movdqa %xmm7, %xmm4
+; SSE-NEXT: psrlq $48, %xmm4
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm10 # 16-byte Reload
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm10[2,2,3,3]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm6 = xmm6[0],xmm4[0]
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: pandn %xmm6, %xmm4
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,1,2,3,4,5,6,7]
@@ -2015,16 +2018,16 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: por %xmm4, %xmm5
; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm4 # 16-byte Folded Reload
; SSE-NEXT: # xmm4 = mem[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm5
+; SSE-NEXT: pand %xmm12, %xmm5
; SSE-NEXT: por %xmm5, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: psrlq $48, %xmm10
+; SSE-NEXT: movdqa %xmm13, %xmm4
+; SSE-NEXT: psrlq $48, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm11[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm10[0]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm5 = xmm5[0],xmm4[0]
; SSE-NEXT: movdqa %xmm1, %xmm4
; SSE-NEXT: pandn %xmm5, %xmm4
; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,1,2,3,4,5,6,7]
@@ -2033,19 +2036,16 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: pand %xmm1, %xmm3
; SSE-NEXT: por %xmm4, %xmm3
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm2[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm14, %xmm0
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[0,1,0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm0
; SSE-NEXT: pandn %xmm4, %xmm0
-; SSE-NEXT: pand %xmm14, %xmm3
+; SSE-NEXT: pand %xmm12, %xmm3
; SSE-NEXT: por %xmm3, %xmm0
; SSE-NEXT: movdqa %xmm0, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
+; SSE-NEXT: psrlq $48, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movdqa %xmm5, %xmm3
-; SSE-NEXT: psrlq $48, %xmm3
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm13 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm13[2,2,3,3]
-; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm3[0]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm5[2,2,3,3]
+; SSE-NEXT: punpcklqdq {{.*#+}} xmm4 = xmm4[0],xmm9[0]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm8[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,1,3,3,4,5,6,7]
@@ -2054,51 +2054,47 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: por %xmm2, %xmm1
; SSE-NEXT: pshufhw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[0,1,2,3,7,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,2,0]
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,7,4,5]
-; SSE-NEXT: movdqa %xmm14, %xmm10
-; SSE-NEXT: pandn %xmm2, %xmm10
-; SSE-NEXT: pand %xmm14, %xmm1
-; SSE-NEXT: por %xmm1, %xmm10
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,2]
+; SSE-NEXT: movdqa %xmm12, %xmm13
+; SSE-NEXT: pandn %xmm2, %xmm13
+; SSE-NEXT: pand %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm13
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[1,1,1,1]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm6[2,3,2,3]
+; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: # xmm2 = mem[2,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm2 = xmm2[0],xmm1[0],xmm2[1],xmm1[1],xmm2[2],xmm1[2],xmm2[3],xmm1[3]
-; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm11 # 16-byte Folded Reload
-; SSE-NEXT: # xmm11 = mem[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm11[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Reload
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm7[1]
+; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm9 # 16-byte Folded Reload
+; SSE-NEXT: # xmm9 = mem[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm9[0,1,2,3,4,5,4,6]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm3 = xmm3[1],xmm6[1]
; SSE-NEXT: movss {{.*#+}} xmm3 = xmm2[0],xmm3[1,2,3]
; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa %xmm14, %xmm9
-; SSE-NEXT: pandn %xmm2, %xmm9
-; SSE-NEXT: andps %xmm14, %xmm3
-; SSE-NEXT: por %xmm3, %xmm9
+; SSE-NEXT: movdqa %xmm12, %xmm11
+; SSE-NEXT: pandn %xmm2, %xmm11
+; SSE-NEXT: andps %xmm12, %xmm3
+; SSE-NEXT: por %xmm3, %xmm11
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[2,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE-NEXT: pshufd $196, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[0,1,0,3]
-; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
-; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,4,6]
-; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
-; SSE-NEXT: # xmm2 = xmm2[1],mem[1]
+; SSE-NEXT: pshufd {{.*#+}} xmm8 = xmm10[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm8[0,1,2,3,4,5,4,6]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm7[1]
; SSE-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa %xmm14, %xmm8
-; SSE-NEXT: pandn %xmm3, %xmm8
-; SSE-NEXT: andps %xmm14, %xmm2
-; SSE-NEXT: por %xmm2, %xmm8
+; SSE-NEXT: movdqa %xmm12, %xmm10
+; SSE-NEXT: pandn %xmm3, %xmm10
+; SSE-NEXT: andps %xmm12, %xmm2
+; SSE-NEXT: por %xmm2, %xmm10
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
@@ -2115,140 +2111,141 @@ define void @vf32(<192 x i16>* %in.vec, <32 x i16>* %out.vec0, <32 x i16>* %out.
; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa %xmm14, %xmm4
-; SSE-NEXT: pandn %xmm3, %xmm4
-; SSE-NEXT: andps %xmm14, %xmm2
-; SSE-NEXT: por %xmm2, %xmm4
+; SSE-NEXT: movdqa %xmm12, %xmm7
+; SSE-NEXT: pandn %xmm3, %xmm7
+; SSE-NEXT: andps %xmm12, %xmm2
+; SSE-NEXT: por %xmm2, %xmm7
; SSE-NEXT: pshufd $85, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[1,1,1,1]
; SSE-NEXT: pshufd $238, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[2,3,2,3]
; SSE-NEXT: punpcklwd {{.*#+}} xmm3 = xmm3[0],xmm2[0],xmm3[1],xmm2[1],xmm3[2],xmm2[2],xmm3[3],xmm2[3]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm13[0,1,0,3]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm5[0,1,0,3]
; SSE-NEXT: movdqa %xmm1, {{[-0-9]+}}(%r{{[sb]}}p) # 16-byte Spill
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm1[0,1,2,3,4,5,4,6]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm5[1]
+; SSE-NEXT: punpckhqdq {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
+; SSE-NEXT: # xmm2 = xmm2[1],mem[1]
; SSE-NEXT: movss {{.*#+}} xmm2 = xmm3[0],xmm2[1,2,3]
; SSE-NEXT: pshuflw $232, {{[-0-9]+}}(%r{{[sb]}}p), %xmm3 # 16-byte Folded Reload
; SSE-NEXT: # xmm3 = mem[0,2,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[0,1,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm3[0,1,2,3,4,5,4,6]
-; SSE-NEXT: movdqa %xmm14, %xmm3
-; SSE-NEXT: pandn %xmm1, %xmm3
-; SSE-NEXT: andps %xmm14, %xmm2
-; SSE-NEXT: por %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm12, %xmm4
+; SSE-NEXT: pandn %xmm1, %xmm4
+; SSE-NEXT: andps %xmm12, %xmm2
+; SSE-NEXT: por %xmm2, %xmm4
; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: psrldq {{.*#+}} xmm6 = xmm6[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm6 = xmm6[0],xmm0[0],xmm6[1],xmm0[1],xmm6[2],xmm0[2],xmm6[3],xmm0[3]
-; SSE-NEXT: psrld $16, %xmm7
-; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm11[0,1,2,3,4,5,5,7]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm7[1]
-; SSE-NEXT: movss {{.*#+}} xmm1 = xmm6[0],xmm1[1,2,3]
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: movdqa %xmm1, %xmm2
+; SSE-NEXT: psrld $16, %xmm6
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm9[0,1,2,3,4,5,5,7]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm2[0],xmm1[1,2,3]
; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Folded Reload
; SSE-NEXT: # xmm2 = mem[3,1,2,3,4,5,6,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,4,5,7]
-; SSE-NEXT: movdqa %xmm14, %xmm2
-; SSE-NEXT: pandn %xmm6, %xmm2
-; SSE-NEXT: andps %xmm14, %xmm1
-; SSE-NEXT: por %xmm1, %xmm2
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: psrlq $48, %xmm5
+; SSE-NEXT: pshufhw {{.*#+}} xmm9 = xmm2[0,1,2,3,4,4,5,7]
+; SSE-NEXT: movdqa %xmm12, %xmm3
+; SSE-NEXT: pandn %xmm9, %xmm3
+; SSE-NEXT: andps %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm3
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: psrlq $48, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm5[0],xmm1[1],xmm5[1],xmm1[2],xmm5[2],xmm1[3],xmm5[3]
-; SSE-NEXT: movdqa %xmm1, %xmm5
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
-; SSE-NEXT: psrld $16, %xmm1
-; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,1,2,3,4,5,5,7]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm1[1]
-; SSE-NEXT: movss {{.*#+}} xmm6 = xmm5[0],xmm6[1,2,3]
-; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
-; SSE-NEXT: # xmm1 = mem[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm1[0,1,2,3,4,4,5,7]
-; SSE-NEXT: movdqa %xmm14, %xmm1
-; SSE-NEXT: pandn %xmm13, %xmm1
-; SSE-NEXT: andps %xmm14, %xmm6
-; SSE-NEXT: por %xmm6, %xmm1
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1],xmm1[2],xmm2[2],xmm1[3],xmm2[3]
+; SSE-NEXT: movdqa %xmm1, %xmm9
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: psrld $16, %xmm2
+; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm8[0,1,2,3,4,5,5,7]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm9[0],xmm1[1,2,3]
+; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7]
+; SSE-NEXT: movdqa %xmm12, %xmm9
+; SSE-NEXT: pandn %xmm6, %xmm9
+; SSE-NEXT: andps %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm9
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: psrlq $48, %xmm0
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm0[0],xmm5[1],xmm0[1],xmm5[2],xmm0[2],xmm5[3],xmm0[3]
-; SSE-NEXT: movdqa %xmm5, %xmm7
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: psrld $16, %xmm5
-; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,1,2,3,4,5,5,7]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
-; SSE-NEXT: movss {{.*#+}} xmm6 = xmm7[0],xmm6[1,2,3]
-; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm7 # 16-byte Folded Reload
-; SSE-NEXT: # xmm7 = mem[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm13 = xmm7[0,1,2,3,4,4,5,7]
-; SSE-NEXT: movdqa %xmm14, %xmm7
-; SSE-NEXT: pandn %xmm13, %xmm7
-; SSE-NEXT: andps %xmm14, %xmm6
-; SSE-NEXT: por %xmm6, %xmm7
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: psrldq {{.*#+}} xmm1 = xmm1[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
+; SSE-NEXT: punpcklwd {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1],xmm1[2],xmm0[2],xmm1[3],xmm0[3]
+; SSE-NEXT: movdqa %xmm1, %xmm6
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm2 # 16-byte Reload
+; SSE-NEXT: psrld $16, %xmm2
+; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm2[1]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm6[0],xmm1[1,2,3]
+; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7]
+; SSE-NEXT: movdqa %xmm12, %xmm2
+; SSE-NEXT: pandn %xmm6, %xmm2
+; SSE-NEXT: andps %xmm12, %xmm1
+; SSE-NEXT: por %xmm1, %xmm2
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
; SSE-NEXT: psrlq $48, %xmm6
; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
; SSE-NEXT: psrldq {{.*#+}} xmm5 = xmm5[10,11,12,13,14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE-NEXT: punpcklwd {{.*#+}} xmm5 = xmm5[0],xmm6[0],xmm5[1],xmm6[1],xmm5[2],xmm6[2],xmm5[3],xmm6[3]
-; SSE-NEXT: movdqa %xmm5, %xmm11
-; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: psrld $16, %xmm5
-; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
-; SSE-NEXT: # xmm6 = mem[0,1,2,3,4,5,5,7]
-; SSE-NEXT: punpckhqdq {{.*#+}} xmm6 = xmm6[1],xmm5[1]
-; SSE-NEXT: movss {{.*#+}} xmm6 = xmm11[0],xmm6[1,2,3]
-; SSE-NEXT: andps %xmm14, %xmm6
-; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Folded Reload
-; SSE-NEXT: # xmm5 = mem[3,1,2,3,4,5,6,7]
-; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,0,3]
-; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,4,4,5,7]
-; SSE-NEXT: pandn %xmm5, %xmm14
-; SSE-NEXT: por %xmm6, %xmm14
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 48(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 16(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 32(%rsi)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, (%rsi)
-; SSE-NEXT: movaps %xmm12, 48(%rdx)
+; SSE-NEXT: movdqa {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Reload
+; SSE-NEXT: psrld $16, %xmm6
+; SSE-NEXT: pshufhw $212, {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Folded Reload
+; SSE-NEXT: # xmm1 = mem[0,1,2,3,4,5,5,7]
+; SSE-NEXT: punpckhqdq {{.*#+}} xmm1 = xmm1[1],xmm6[1]
+; SSE-NEXT: movss {{.*#+}} xmm1 = xmm5[0],xmm1[1,2,3]
+; SSE-NEXT: andps %xmm12, %xmm1
+; SSE-NEXT: pshuflw $231, {{[-0-9]+}}(%r{{[sb]}}p), %xmm6 # 16-byte Folded Reload
+; SSE-NEXT: # xmm6 = mem[3,1,2,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,0,3]
+; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm6[0,1,2,3,4,4,5,7]
+; SSE-NEXT: pandn %xmm6, %xmm12
+; SSE-NEXT: por %xmm1, %xmm12
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 48(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 16(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 32(%rsi)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, (%rsi)
+; SSE-NEXT: movaps %xmm14, 48(%rdx)
; SSE-NEXT: movaps %xmm15, 16(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 32(%rdx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, (%rdx)
-; SSE-NEXT: movaps (%rsp), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 16(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 48(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, 32(%rcx)
-; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm5 # 16-byte Reload
-; SSE-NEXT: movaps %xmm5, (%rcx)
-; SSE-NEXT: movdqa %xmm10, 48(%r8)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 32(%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, (%rdx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 16(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 48(%rcx)
+; SSE-NEXT: movaps (%rsp), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, 32(%rcx)
+; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm1 # 16-byte Reload
+; SSE-NEXT: movaps %xmm1, (%rcx)
+; SSE-NEXT: movdqa %xmm13, 48(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 16(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, 32(%r8)
; SSE-NEXT: movaps {{[-0-9]+}}(%r{{[sb]}}p), %xmm0 # 16-byte Reload
; SSE-NEXT: movaps %xmm0, (%r8)
-; SSE-NEXT: movdqa %xmm3, 48(%r9)
-; SSE-NEXT: movdqa %xmm4, 16(%r9)
-; SSE-NEXT: movdqa %xmm8, 32(%r9)
-; SSE-NEXT: movdqa %xmm9, (%r9)
+; SSE-NEXT: movdqa %xmm4, 48(%r9)
+; SSE-NEXT: movdqa %xmm7, 16(%r9)
+; SSE-NEXT: movdqa %xmm10, 32(%r9)
+; SSE-NEXT: movdqa %xmm11, (%r9)
; SSE-NEXT: movq {{[0-9]+}}(%rsp), %rax
-; SSE-NEXT: movdqa %xmm14, 48(%rax)
-; SSE-NEXT: movdqa %xmm7, 16(%rax)
-; SSE-NEXT: movdqa %xmm1, 32(%rax)
-; SSE-NEXT: movdqa %xmm2, (%rax)
+; SSE-NEXT: movdqa %xmm12, 48(%rax)
+; SSE-NEXT: movdqa %xmm2, 16(%rax)
+; SSE-NEXT: movdqa %xmm9, 32(%rax)
+; SSE-NEXT: movdqa %xmm3, (%rax)
; SSE-NEXT: addq $456, %rsp # imm = 0x1C8
; SSE-NEXT: retq
;
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
index 386fac87aa31..2d3325b6f132 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-load-i8-stride-6.ll
@@ -481,7 +481,7 @@ define void @load_i8_stride6_vf8(<48 x i8>* %in.vec, <8 x i8>* %out.vec0, <8 x i
; SSE-NEXT: por %xmm1, %xmm5
; SSE-NEXT: movdqa %xmm3, %xmm1
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8],xmm9[8],xmm1[9],xmm9[9],xmm1[10],xmm9[10],xmm1[11],xmm9[11],xmm1[12],xmm9[12],xmm1[13],xmm9[13],xmm1[14],xmm9[14],xmm1[15],xmm9[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,5,5,5]
; SSE-NEXT: movdqa {{.*#+}} xmm4 = [65535,0,65535,65535,0,65535,65535,65535]
; SSE-NEXT: punpcklbw {{.*#+}} xmm3 = xmm3[0],xmm9[0],xmm3[1],xmm9[1],xmm3[2],xmm9[2],xmm3[3],xmm9[3],xmm3[4],xmm9[4],xmm3[5],xmm9[5],xmm3[6],xmm9[6],xmm3[7],xmm9[7]
@@ -894,8 +894,7 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm13[0],xmm7[1],xmm13[1],xmm7[2],xmm13[2],xmm7[3],xmm13[3],xmm7[4],xmm13[4],xmm7[5],xmm13[5],xmm7[6],xmm13[6],xmm7[7],xmm13[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm7[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,4,5,7,7]
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [65535,65535,65535,65535,0,65535,65535,0]
; SSE-NEXT: pand %xmm2, %xmm1
@@ -958,7 +957,7 @@ define void @load_i8_stride6_vf16(<96 x i8>* %in.vec, <16 x i8>* %out.vec0, <16
; SSE-NEXT: packuswb %xmm5, %xmm1
; SSE-NEXT: movdqa %xmm10, %xmm5
; SSE-NEXT: punpckhbw {{.*#+}} xmm5 = xmm5[8],xmm13[8],xmm5[9],xmm13[9],xmm5[10],xmm13[10],xmm5[11],xmm13[11],xmm5[12],xmm13[12],xmm5[13],xmm13[13],xmm5[14],xmm13[14],xmm5[15],xmm13[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[1,1,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,5,5,5,5]
; SSE-NEXT: movdqa {{.*#+}} xmm6 = [65535,0,65535,65535,0,65535,65535,65535]
; SSE-NEXT: punpcklbw {{.*#+}} xmm10 = xmm10[0],xmm13[0],xmm10[1],xmm13[1],xmm10[2],xmm13[2],xmm10[3],xmm13[3],xmm10[4],xmm13[4],xmm10[5],xmm13[5],xmm10[6],xmm13[6],xmm10[7],xmm13[7]
@@ -1808,8 +1807,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: movdqa %xmm2, %xmm4
; SSE-NEXT: pandn %xmm0, %xmm4
; SSE-NEXT: punpcklbw {{.*#+}} xmm14 = xmm14[0],xmm10[0],xmm14[1],xmm10[1],xmm14[2],xmm10[2],xmm14[3],xmm10[3],xmm14[4],xmm10[4],xmm14[5],xmm10[5],xmm14[6],xmm10[6],xmm14[7],xmm10[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm14[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: por %xmm4, %xmm0
@@ -1853,8 +1851,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,1,2,1]
; SSE-NEXT: pshufhw {{.*#+}} xmm0 = xmm0[0,1,2,3,7,5,6,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm7 = xmm7[0],xmm10[0],xmm7[1],xmm10[1],xmm7[2],xmm10[2],xmm7[3],xmm10[3],xmm7[4],xmm10[4],xmm7[5],xmm10[5],xmm7[6],xmm10[6],xmm7[7],xmm10[7]
-; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[2,1,0,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[1,1,1,1,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm7[0,2,0,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,4,5,7,7]
; SSE-NEXT: pand %xmm2, %xmm3
; SSE-NEXT: pandn %xmm0, %xmm2
@@ -1958,7 +1955,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: pandn %xmm3, %xmm6
; SSE-NEXT: movdqa %xmm7, %xmm3
; SSE-NEXT: punpckhbw {{.*#+}} xmm3 = xmm3[8],xmm10[8],xmm3[9],xmm10[9],xmm3[10],xmm10[10],xmm3[11],xmm10[11],xmm3[12],xmm10[12],xmm3[13],xmm10[13],xmm3[14],xmm10[14],xmm3[15],xmm10[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm3[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm3 = xmm3[1,1,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm3[0,1,2,3,5,5,5,5]
; SSE-NEXT: movdqa {{.*#+}} xmm5 = [65535,0,65535,65535,0,65535,65535,65535]
; SSE-NEXT: movdqa %xmm5, %xmm4
@@ -2006,7 +2003,7 @@ define void @load_i8_stride6_vf32(<192 x i8>* %in.vec, <32 x i8>* %out.vec0, <32
; SSE-NEXT: packuswb %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm8, %xmm4
; SSE-NEXT: punpckhbw {{.*#+}} xmm4 = xmm4[8],xmm10[8],xmm4[9],xmm10[9],xmm4[10],xmm10[10],xmm4[11],xmm10[11],xmm4[12],xmm10[12],xmm4[13],xmm10[13],xmm4[14],xmm10[14],xmm4[15],xmm10[15]
-; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm4[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm4 = xmm4[1,1,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm4 = xmm4[0,1,2,3,5,5,5,5]
; SSE-NEXT: punpcklbw {{.*#+}} xmm8 = xmm8[0],xmm10[0],xmm8[1],xmm10[1],xmm8[2],xmm10[2],xmm8[3],xmm10[3],xmm8[4],xmm10[4],xmm8[5],xmm10[5],xmm8[6],xmm10[6],xmm8[7],xmm10[7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm8[0,1,2,3,7,5,6,7]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
index f639eae42eff..9d7b11119d5e 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i16-stride-5.ll
@@ -821,7 +821,7 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm12[3],xmm0[4,5,6,7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm12[4],xmm7[4],xmm12[5],xmm7[5],xmm12[6],xmm7[6],xmm12[7],xmm7[7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm0
; AVX1-NEXT: vmovaps {{.*#+}} ymm9 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
; AVX1-NEXT: vandnps %ymm0, %ymm9, %ymm0
@@ -873,7 +873,7 @@ define void @vf16(<16 x i16>* %in.vecptr0, <16 x i16>* %in.vecptr1, <16 x i16>*
; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm10[0],xmm11[0],xmm10[1],xmm11[1],xmm10[2],xmm11[2],xmm10[3],xmm11[3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm1, %ymm1
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm2 = xmm4[4],xmm7[4],xmm4[5],xmm7[5],xmm4[6],xmm7[6],xmm4[7],xmm7[7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm2 = xmm2[0,1,2,3,4,5,7,6]
@@ -1686,7 +1686,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm2 = xmm9[0],xmm10[0],xmm9[1],xmm10[1],xmm9[2],xmm10[2],xmm9[3],xmm10[3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm2 = xmm2[0,1,1,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm2, %ymm0, %ymm0
; AVX1-NEXT: vmovaps {{.*#+}} ymm12 = [65535,65535,0,0,65535,65535,65535,0,0,65535,65535,65535,0,0,65535,65535]
; AVX1-NEXT: vandnps %ymm0, %ymm12, %ymm2
@@ -1718,7 +1718,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
; AVX1-NEXT: vpblendw {{.*#+}} xmm5 = xmm5[0,1,2],xmm8[3],xmm5[4,5,6,7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm6 = xmm8[4],xmm0[4],xmm8[5],xmm0[5],xmm8[6],xmm0[6],xmm8[7],xmm0[7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm6 = xmm6[0,1,3,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm6[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm6 = xmm6[0],zero,xmm6[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm6, %ymm5, %ymm5
; AVX1-NEXT: vandnps %ymm5, %ymm12, %ymm5
; AVX1-NEXT: vpshufd {{.*#+}} xmm6 = xmm3[1,1,2,2]
@@ -1794,7 +1794,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
; AVX1-NEXT: vpblendw {{.*#+}} xmm0 = xmm0[0,1,2],xmm10[3],xmm0[4,5,6,7]
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm1 = xmm10[4],xmm4[4],xmm10[5],xmm4[5],xmm10[6],xmm4[6],xmm10[7],xmm4[7]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm1 = xmm1[0,1,3,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm1, %ymm0, %ymm1
; AVX1-NEXT: vmovdqa (%rsi), %xmm8
; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm8[3,3,3,3,4,5,6,7]
@@ -1846,7 +1846,7 @@ define void @vf32(<32 x i16>* %in.vecptr0, <32 x i16>* %in.vecptr1, <32 x i16>*
; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
; AVX1-NEXT: vpunpcklwd {{.*#+}} xmm7 = xmm13[0],xmm11[0],xmm13[1],xmm11[1],xmm13[2],xmm11[2],xmm13[3],xmm11[3]
; AVX1-NEXT: vpshuflw {{.*#+}} xmm7 = xmm7[0,1,1,2,4,5,6,7]
-; AVX1-NEXT: vpshufd {{.*#+}} xmm7 = xmm7[0,1,1,3]
+; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm7 = xmm7[0],zero,xmm7[1],zero
; AVX1-NEXT: vinsertf128 $1, %xmm7, %ymm0, %ymm0
; AVX1-NEXT: vpunpckhwd {{.*#+}} xmm7 = xmm3[4],xmm4[4],xmm3[5],xmm4[5],xmm3[6],xmm4[6],xmm3[7],xmm4[7]
; AVX1-NEXT: vpshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,4,5,7,6]
diff --git a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
index f8390f581157..3996024a6a1f 100644
--- a/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
+++ b/llvm/test/CodeGen/X86/vector-interleaved-store-i8-stride-3.ll
@@ -233,22 +233,22 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm8[2,1,3,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm2 = xmm0[0,1,2,3,4,4,4,4]
; SSE-NEXT: movdqa {{.*#+}} xmm0 = [255,0,255,255,0,255,255,0,255,255,0,255,255,0,255,255]
-; SSE-NEXT: movdqa %xmm0, %xmm3
-; SSE-NEXT: pandn %xmm2, %xmm3
+; SSE-NEXT: movdqa %xmm0, %xmm4
+; SSE-NEXT: pandn %xmm2, %xmm4
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm1[2,1,2,3]
; SSE-NEXT: punpcklbw {{.*#+}} xmm2 = xmm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,3,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[1,2,2,3,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm6 = xmm2[0,1,2,3,4,5,5,6]
; SSE-NEXT: pand %xmm0, %xmm6
-; SSE-NEXT: por %xmm3, %xmm6
+; SSE-NEXT: por %xmm4, %xmm6
; SSE-NEXT: movdqa {{.*#+}} xmm2 = [255,255,0,255,255,0,255,255,0,255,255,0,255,255,0,255]
; SSE-NEXT: pand %xmm2, %xmm6
-; SSE-NEXT: pshuflw {{.*#+}} xmm3 = xmm5[3,3,3,3,4,5,6,7]
-; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm3[0,1,2,3,4,4,6,5]
-; SSE-NEXT: movdqa %xmm2, %xmm3
-; SSE-NEXT: pandn %xmm7, %xmm3
-; SSE-NEXT: por %xmm6, %xmm3
+; SSE-NEXT: pshuflw {{.*#+}} xmm4 = xmm5[3,3,3,3,4,5,6,7]
+; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm4[0,1,2,3,4,4,6,5]
+; SSE-NEXT: movdqa %xmm2, %xmm4
+; SSE-NEXT: pandn %xmm7, %xmm4
+; SSE-NEXT: por %xmm6, %xmm4
; SSE-NEXT: movdqa %xmm1, %xmm6
; SSE-NEXT: punpcklbw {{.*#+}} xmm6 = xmm6[0],xmm1[0],xmm6[1],xmm1[1],xmm6[2],xmm1[2],xmm6[3],xmm1[3],xmm6[4],xmm1[4],xmm6[5],xmm1[5],xmm6[6],xmm1[6],xmm6[7],xmm1[7]
; SSE-NEXT: pshufd {{.*#+}} xmm6 = xmm6[0,1,1,2]
@@ -264,9 +264,9 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm8[0,1,0,1]
; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,0,0,0,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,5,6,6]
-; SSE-NEXT: movdqa %xmm2, %xmm4
-; SSE-NEXT: pandn %xmm7, %xmm4
-; SSE-NEXT: por %xmm6, %xmm4
+; SSE-NEXT: movdqa %xmm2, %xmm3
+; SSE-NEXT: pandn %xmm7, %xmm3
+; SSE-NEXT: por %xmm6, %xmm3
; SSE-NEXT: pshufd {{.*#+}} xmm5 = xmm5[2,3,2,3]
; SSE-NEXT: pshuflw {{.*#+}} xmm5 = xmm5[1,1,2,2,4,5,6,7]
; SSE-NEXT: pshufhw {{.*#+}} xmm5 = xmm5[0,1,2,3,7,7,7,7]
@@ -278,14 +278,13 @@ define void @store_i8_stride3_vf16(<16 x i8>* %in.vecptr0, <16 x i8>* %in.vecptr
; SSE-NEXT: por %xmm6, %xmm0
; SSE-NEXT: pand %xmm2, %xmm0
; SSE-NEXT: punpckhbw {{.*#+}} xmm1 = xmm1[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15]
-; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm1 = xmm1[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm1 = xmm1[0,1,2,3,5,6,6,7]
; SSE-NEXT: pandn %xmm1, %xmm2
; SSE-NEXT: por %xmm0, %xmm2
; SSE-NEXT: movdqa %xmm2, 32(%rcx)
-; SSE-NEXT: movdqa %xmm4, (%rcx)
-; SSE-NEXT: movdqa %xmm3, 16(%rcx)
+; SSE-NEXT: movdqa %xmm3, (%rcx)
+; SSE-NEXT: movdqa %xmm4, 16(%rcx)
; SSE-NEXT: retq
;
; AVX-LABEL: store_i8_stride3_vf16:
@@ -403,8 +402,7 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
; SSE-NEXT: pand %xmm4, %xmm1
; SSE-NEXT: movdqa %xmm7, %xmm2
; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm7[8],xmm2[9],xmm7[9],xmm2[10],xmm7[10],xmm2[11],xmm7[11],xmm2[12],xmm7[12],xmm2[13],xmm7[13],xmm2[14],xmm7[14],xmm2[15],xmm7[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm2 = xmm2[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm3 = xmm2[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm2
; SSE-NEXT: pandn %xmm3, %xmm2
@@ -439,8 +437,7 @@ define void @store_i8_stride3_vf32(<32 x i8>* %in.vecptr0, <32 x i8>* %in.vecptr
; SSE-NEXT: pand %xmm4, %xmm3
; SSE-NEXT: movdqa %xmm13, %xmm7
; SSE-NEXT: punpckhbw {{.*#+}} xmm7 = xmm7[8],xmm13[8],xmm7[9],xmm13[9],xmm7[10],xmm13[10],xmm7[11],xmm13[11],xmm7[12],xmm13[12],xmm7[13],xmm13[13],xmm7[14],xmm13[14],xmm7[15],xmm13[15]
-; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[2,1,2,3]
-; SSE-NEXT: pshuflw {{.*#+}} xmm7 = xmm7[0,3,0,3,4,5,6,7]
+; SSE-NEXT: pshufd {{.*#+}} xmm7 = xmm7[1,2,2,3]
; SSE-NEXT: pshufhw {{.*#+}} xmm7 = xmm7[0,1,2,3,5,6,6,7]
; SSE-NEXT: movdqa %xmm4, %xmm5
; SSE-NEXT: pandn %xmm7, %xmm5
diff --git a/llvm/test/CodeGen/X86/vector-rotate-128.ll b/llvm/test/CodeGen/X86/vector-rotate-128.ll
index a10611210f99..16cce0adfcb2 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-128.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-128.ll
@@ -809,27 +809,15 @@ define <2 x i64> @splatvar_rotate_v2i64(<2 x i64> %a, <2 x i64> %b) nounwind {
}
define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
-; SSE2-LABEL: splatvar_rotate_v4i32:
-; SSE2: # %bb.0:
-; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE2-NEXT: movd %xmm1, %eax
-; SSE2-NEXT: andl $31, %eax
-; SSE2-NEXT: movd %eax, %xmm1
-; SSE2-NEXT: psllq %xmm1, %xmm2
-; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE2-NEXT: psllq %xmm1, %xmm0
-; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_rotate_v4i32:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; SSE41-NEXT: psllq %xmm1, %xmm2
-; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
-; SSE41-NEXT: psllq %xmm1, %xmm0
-; SSE41-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_rotate_v4i32:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
+; SSE-NEXT: psllq %xmm1, %xmm2
+; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
+; SSE-NEXT: psllq %xmm1, %xmm0
+; SSE-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,3],xmm2[1,3]
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_rotate_v4i32:
; AVX: # %bb.0:
@@ -900,10 +888,8 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v4i32:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[2,2,3,3]
-; X86-SSE2-NEXT: movd %xmm1, %eax
-; X86-SSE2-NEXT: andl $31, %eax
-; X86-SSE2-NEXT: movd %eax, %xmm1
; X86-SSE2-NEXT: psllq %xmm1, %xmm2
; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1]
; X86-SSE2-NEXT: psllq %xmm1, %xmm0
@@ -920,11 +906,9 @@ define <4 x i32> @splatvar_rotate_v4i32(<4 x i32> %a, <4 x i32> %b) nounwind {
define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
; SSE2-LABEL: splatvar_rotate_v8i16:
; SSE2: # %bb.0:
+; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
; SSE2-NEXT: movdqa %xmm0, %xmm2
; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; SSE2-NEXT: pslld %xmm1, %xmm2
; SSE2-NEXT: psrad $16, %xmm2
; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1036,11 +1020,9 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v8i16:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm0[4],xmm2[5],xmm0[5],xmm2[6],xmm0[6],xmm2[7],xmm0[7]
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0,1]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[14,15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: pslld %xmm1, %xmm2
; X86-SSE2-NEXT: psrad $16, %xmm2
; X86-SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
@@ -1057,33 +1039,18 @@ define <8 x i16> @splatvar_rotate_v8i16(<8 x i16> %a, <8 x i16> %b) nounwind {
}
define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
-; SSE2-LABEL: splatvar_rotate_v16i8:
-; SSE2: # %bb.0:
-; SSE2-NEXT: movdqa %xmm0, %xmm2
-; SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
-; SSE2-NEXT: psllw %xmm1, %xmm2
-; SSE2-NEXT: psrlw $8, %xmm2
-; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE2-NEXT: psllw %xmm1, %xmm0
-; SSE2-NEXT: psrlw $8, %xmm0
-; SSE2-NEXT: packuswb %xmm2, %xmm0
-; SSE2-NEXT: retq
-;
-; SSE41-LABEL: splatvar_rotate_v16i8:
-; SSE41: # %bb.0:
-; SSE41-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
-; SSE41-NEXT: movdqa %xmm0, %xmm2
-; SSE41-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; SSE41-NEXT: psllw %xmm1, %xmm2
-; SSE41-NEXT: psrlw $8, %xmm2
-; SSE41-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
-; SSE41-NEXT: psllw %xmm1, %xmm0
-; SSE41-NEXT: psrlw $8, %xmm0
-; SSE41-NEXT: packuswb %xmm2, %xmm0
-; SSE41-NEXT: retq
+; SSE-LABEL: splatvar_rotate_v16i8:
+; SSE: # %bb.0:
+; SSE-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1
+; SSE-NEXT: movdqa %xmm0, %xmm2
+; SSE-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
+; SSE-NEXT: psllw %xmm1, %xmm2
+; SSE-NEXT: psrlw $8, %xmm2
+; SSE-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
+; SSE-NEXT: psllw %xmm1, %xmm0
+; SSE-NEXT: psrlw $8, %xmm0
+; SSE-NEXT: packuswb %xmm2, %xmm0
+; SSE-NEXT: retq
;
; AVX-LABEL: splatvar_rotate_v16i8:
; AVX: # %bb.0:
@@ -1124,11 +1091,9 @@ define <16 x i8> @splatvar_rotate_v16i8(<16 x i8> %a, <16 x i8> %b) nounwind {
;
; X86-SSE2-LABEL: splatvar_rotate_v16i8:
; X86-SSE2: # %bb.0:
+; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
; X86-SSE2-NEXT: movdqa %xmm0, %xmm2
; X86-SSE2-NEXT: punpckhbw {{.*#+}} xmm2 = xmm2[8],xmm0[8],xmm2[9],xmm0[9],xmm2[10],xmm0[10],xmm2[11],xmm0[11],xmm2[12],xmm0[12],xmm2[13],xmm0[13],xmm2[14],xmm0[14],xmm2[15],xmm0[15]
-; X86-SSE2-NEXT: pand {{\.?LCPI[0-9]+_[0-9]+}}, %xmm1
-; X86-SSE2-NEXT: pslldq {{.*#+}} xmm1 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm1[0]
-; X86-SSE2-NEXT: psrldq {{.*#+}} xmm1 = xmm1[15],zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero
; X86-SSE2-NEXT: psllw %xmm1, %xmm2
; X86-SSE2-NEXT: psrlw $8, %xmm2
; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7]
diff --git a/llvm/test/CodeGen/X86/vector-rotate-256.ll b/llvm/test/CodeGen/X86/vector-rotate-256.ll
index 7682ea83ec79..3591a891f801 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-256.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-256.ll
@@ -671,10 +671,8 @@ define <8 x i32> @splatvar_rotate_v8i32(<8 x i32> %a, <8 x i32> %b) nounwind {
;
; AVX2-LABEL: splatvar_rotate_v8i32:
; AVX2: # %bb.0:
-; AVX2-NEXT: vpbroadcastd {{.*#+}} xmm2 = [31,31,31,31]
-; AVX2-NEXT: vpand %xmm2, %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
; AVX2-NEXT: vpshufd {{.*#+}} ymm2 = ymm0[2,2,3,3,6,6,7,7]
+; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX2-NEXT: vpsllq %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,0,1,1,4,4,5,5]
; AVX2-NEXT: vpsllq %xmm1, %ymm0, %ymm0
@@ -892,7 +890,6 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX2: # %bb.0:
; AVX2-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX2-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX2-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX2-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -905,7 +902,6 @@ define <32 x i8> @splatvar_rotate_v32i8(<32 x i8> %a, <32 x i8> %b) nounwind {
; AVX512: # %bb.0:
; AVX512-NEXT: vpunpckhbw {{.*#+}} ymm2 = ymm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
; AVX512-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512-NEXT: vpsllw %xmm1, %ymm2, %ymm2
; AVX512-NEXT: vpsrlw $8, %ymm2, %ymm2
; AVX512-NEXT: vpunpcklbw {{.*#+}} ymm0 = ymm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
diff --git a/llvm/test/CodeGen/X86/vector-rotate-512.ll b/llvm/test/CodeGen/X86/vector-rotate-512.ll
index b002125ec681..cdc4aa7b75b0 100644
--- a/llvm/test/CodeGen/X86/vector-rotate-512.ll
+++ b/llvm/test/CodeGen/X86/vector-rotate-512.ll
@@ -387,10 +387,9 @@ define <32 x i16> @splatvar_rotate_v32i16(<32 x i16> %a, <32 x i16> %b) nounwind
define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512F-LABEL: splatvar_rotate_v64i8:
; AVX512F: # %bb.0:
+; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512F-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512F-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512F-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512F-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512F-NEXT: vpsllw %xmm1, %ymm3, %ymm3
; AVX512F-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512F-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -409,10 +408,9 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
;
; AVX512VL-LABEL: splatvar_rotate_v64i8:
; AVX512VL: # %bb.0:
+; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
; AVX512VL-NEXT: vextracti64x4 $1, %zmm0, %ymm2
; AVX512VL-NEXT: vpunpckhbw {{.*#+}} ymm3 = ymm2[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31]
-; AVX512VL-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VL-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VL-NEXT: vpsllw %xmm1, %ymm3, %ymm3
; AVX512VL-NEXT: vpsrlw $8, %ymm3, %ymm3
; AVX512VL-NEXT: vpunpcklbw {{.*#+}} ymm2 = ymm2[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23]
@@ -433,7 +431,6 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512BW: # %bb.0:
; AVX512BW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512BW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512BW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512BW-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512BW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512BW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -446,7 +443,6 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512VLBW: # %bb.0:
; AVX512VLBW-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLBW-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLBW-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLBW-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VLBW-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -459,7 +455,6 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512VBMI2: # %bb.0:
; AVX512VBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VBMI2-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
@@ -472,7 +467,6 @@ define <64 x i8> @splatvar_rotate_v64i8(<64 x i8> %a, <64 x i8> %b) nounwind {
; AVX512VLVBMI2: # %bb.0:
; AVX512VLVBMI2-NEXT: vpunpckhbw {{.*#+}} zmm2 = zmm0[8,8,9,9,10,10,11,11,12,12,13,13,14,14,15,15,24,24,25,25,26,26,27,27,28,28,29,29,30,30,31,31,40,40,41,41,42,42,43,43,44,44,45,45,46,46,47,47,56,56,57,57,58,58,59,59,60,60,61,61,62,62,63,63]
; AVX512VLVBMI2-NEXT: vpand {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm1, %xmm1
-; AVX512VLVBMI2-NEXT: vpmovzxbq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,zero,zero,zero,zero,xmm1[1],zero,zero,zero,zero,zero,zero,zero
; AVX512VLVBMI2-NEXT: vpsllw %xmm1, %zmm2, %zmm2
; AVX512VLVBMI2-NEXT: vpsrlw $8, %zmm2, %zmm2
; AVX512VLVBMI2-NEXT: vpunpcklbw {{.*#+}} zmm0 = zmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,16,16,17,17,18,18,19,19,20,20,21,21,22,22,23,23,32,32,33,33,34,34,35,35,36,36,37,37,38,38,39,39,48,48,49,49,50,50,51,51,52,52,53,53,54,54,55,55]
diff --git a/llvm/test/DebugInfo/COFF/compiler-version-overflow.ll b/llvm/test/DebugInfo/COFF/compiler-version-overflow.ll
index 538ba699ae6f..3374bf4519ce 100644
--- a/llvm/test/DebugInfo/COFF/compiler-version-overflow.ll
+++ b/llvm/test/DebugInfo/COFF/compiler-version-overflow.ll
@@ -3,7 +3,7 @@
; CHECK: {{.*}} | S_COMPILE3 [size = {{.*}}]
; CHECK-NEXT: machine = intel pentium 3, Ver = clang version 999999999999.9999999999.9999999.99999999 , language = c++
-; CHECK-NEXT: frontend = 65535.65535.65535.65535, backend = 15000.0.0.0
+; CHECK-NEXT: frontend = 65535.65535.65535.65535, backend = {{[0-9]+}}.0.0.0
; CHECK-NEXT: flags = none
diff --git a/llvm/test/Instrumentation/HWAddressSanitizer/alloca-uninteresting.ll b/llvm/test/Instrumentation/HWAddressSanitizer/alloca-uninteresting.ll
new file mode 100644
index 000000000000..053ca6f8c931
--- /dev/null
+++ b/llvm/test/Instrumentation/HWAddressSanitizer/alloca-uninteresting.ll
@@ -0,0 +1,48 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; Test that unintersted allocas (e.g. dynamic, because we do not know
+; their size) are not instrumented.
+;
+; RUN: opt < %s -passes=hwasan -S | FileCheck %s --check-prefixes=CHECK
+
+target datalayout = "e-m:e-i8:8:32-i16:16:32-i64:64-i128:128-n32:64-S128"
+target triple = "aarch64--linux-android10000"
+
+declare void @use32(i32*)
+
+define void @test_dyn_alloca(i32 %n) sanitize_hwaddress !dbg !15 {
+; CHECK-LABEL: @test_dyn_alloca(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[X:%.*]] = alloca i32, i32 [[N:%.*]], align 4
+; CHECK-NEXT: call void @llvm.dbg.value(metadata !DIArgList(i32* [[X]], i32* [[X]]), metadata [[META10:![0-9]+]], metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_deref)), !dbg [[DBG12:![0-9]+]]
+; CHECK-NEXT: call void @use32(i32* nonnull [[X]]), !dbg [[DBG13:![0-9]+]]
+; CHECK-NEXT: ret void, !dbg [[DBG14:![0-9]+]]
+;
+
+entry:
+ %x = alloca i32, i32 %n, align 4
+ call void @llvm.dbg.value(metadata !DIArgList(i32* %x, i32* %x), metadata !22, metadata !DIExpression(DW_OP_LLVM_arg, 0, DW_OP_LLVM_arg, 1, DW_OP_plus, DW_OP_deref)), !dbg !21
+ call void @use32(i32* nonnull %x), !dbg !23
+ ret void, !dbg !24
+}
+
+declare void @llvm.dbg.value(metadata, metadata, metadata)
+
+!llvm.dbg.cu = !{!0}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!14}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 13.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "alloca.cpp", directory: "/")
+!2 = !{}
+!3 = !{i32 7, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!14 = !{!"clang version 13.0.0"}
+!15 = distinct !DISubprogram(name: "test_alloca", linkageName: "_Z11test_allocav", scope: !1, file: !1, line: 4, type: !16, scopeLine: 4, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!16 = !DISubroutineType(types: !17)
+!17 = !{null}
+!19 = !DIDerivedType(tag: DW_TAG_pointer_type, baseType: !20, size: 64)
+!20 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!21 = !DILocation(line: 0, scope: !15)
+!22 = !DILocalVariable(name: "x", scope: !15, file: !1, line: 5, type: !20)
+!23 = !DILocation(line: 7, column: 5, scope: !15)
+!24 = !DILocation(line: 8, column: 1, scope: !15)
diff --git a/llvm/test/MC/Disassembler/M68k/arithmetic.txt b/llvm/test/MC/Disassembler/M68k/arithmetic.txt
index 670d4297dab8..5e80006183e4 100644
--- a/llvm/test/MC/Disassembler/M68k/arithmetic.txt
+++ b/llvm/test/MC/Disassembler/M68k/arithmetic.txt
@@ -1,4 +1,7 @@
# RUN: llvm-mc -disassemble -triple m68k %s | FileCheck %s
+# Disable this particular test until migration to the new code emitter is
+# finished.
+# XFAIL: *
# CHECK: adda.l %a0, %a1
0xd3 0xc8
diff --git a/llvm/test/TableGen/VarLenEncoder.td b/llvm/test/TableGen/VarLenEncoder.td
new file mode 100644
index 000000000000..a1ea389ffad2
--- /dev/null
+++ b/llvm/test/TableGen/VarLenEncoder.td
@@ -0,0 +1,93 @@
+// RUN: llvm-tblgen -gen-emitter -I %p/../../include %s | FileCheck %s
+
+// Check if VarLenCodeEmitterGen works correctly.
+
+include "llvm/Target/Target.td"
+
+def ArchInstrInfo : InstrInfo { }
+
+def Arch : Target {
+ let InstructionSet = ArchInstrInfo;
+}
+
+def Reg : Register<"reg">;
+
+def RegClass : RegisterClass<"foo", [i64], 0, (add Reg)>;
+
+def GR64 : RegisterOperand<RegClass>;
+
+class MyMemOperand<dag sub_ops> : Operand<iPTR> {
+ let MIOperandInfo = sub_ops;
+ dag Base;
+ dag Extension;
+}
+
+class MyVarInst<MyMemOperand memory_op> : Instruction {
+ dag Inst;
+
+ let OutOperandList = (outs GR64:$dst);
+ let InOperandList = (ins memory_op:$src);
+
+ // Testing `ascend` and `descend`
+ let Inst = (ascend
+ (descend 0b10110111, memory_op.Base),
+ memory_op.Extension,
+ // Testing operand referencing.
+ (operand "$dst", 4),
+ // Testing operand referencing with a certain bit range.
+ (slice "$dst", 3, 1)
+ );
+}
+
+class MemOp16<string op_name> : MyMemOperand<(ops GR64:$reg, i16imm:$offset)> {
+ // Testing sub-operand referencing.
+ let Base = (operand "$"#op_name#".reg", 8);
+ let Extension = (operand "$"#op_name#".offset", 16);
+}
+
+class MemOp32<string op_name> : MyMemOperand<(ops GR64:$reg, i32imm:$offset)> {
+ let Base = (operand "$"#op_name#".reg", 8);
+ // Testing variable-length instruction encoding.
+ let Extension = (operand "$"#op_name#".offset", 32);
+}
+
+def FOO16 : MyVarInst<MemOp16<"src">>;
+def FOO32 : MyVarInst<MemOp32<"src">>;
+
+// The fixed bits part
+// CHECK: {/*NumBits*/39,
+// CHECK-SAME: // FOO16
+// CHECK: {/*NumBits*/55,
+// CHECK-SAME: // FOO32
+// CHECK: UINT64_C(46848), // FOO16
+// CHECK: UINT64_C(46848), // FOO32
+
+// CHECK-LABEL: case ::FOO16: {
+// CHECK: Scratch = Scratch.zextOrSelf(39);
+// src.reg
+// CHECK: getMachineOpValue(MI, MI.getOperand(1), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0);
+// src.offset
+// CHECK: getMachineOpValue(MI, MI.getOperand(2), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(16, 0), 16);
+// 1st dst
+// CHECK: getMachineOpValue(MI, MI.getOperand(0), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(4, 0), 32);
+// 2nd dst
+// CHECK: getMachineOpValue(MI, MI.getOperand(0), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(3, 1), 36);
+
+// CHECK-LABEL: case ::FOO32: {
+// CHECK: Scratch = Scratch.zextOrSelf(55);
+// src.reg
+// CHECK: getMachineOpValue(MI, MI.getOperand(1), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(8, 0), 0);
+// src.offset
+// CHECK: getMachineOpValue(MI, MI.getOperand(2), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(32, 0), 16);
+// 1st dst
+// CHECK: getMachineOpValue(MI, MI.getOperand(0), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(4, 0), 48);
+// 2nd dst
+// CHECK: getMachineOpValue(MI, MI.getOperand(0), Scratch, Fixups, STI);
+// CHECK: Inst.insertBits(Scratch.extractBits(3, 1), 52);
diff --git a/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll b/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll
index 5fd7bad1bdbb..9e745a8f67b6 100644
--- a/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll
+++ b/llvm/test/Transforms/ConstraintElimination/wrapping-math.ll
@@ -22,7 +22,7 @@ define i1 @wrapping_add_known_1(i8 %a) {
; CHECK: then:
; CHECK-NEXT: [[SUB_1:%.*]] = add i8 [[A]], -1
; CHECK-NEXT: [[C_1:%.*]] = icmp eq i8 [[SUB_1]], 0
-; CHECK-NEXT: ret i1 [[C_1]]
+; CHECK-NEXT: ret i1 true
; CHECK: else:
; CHECK-NEXT: [[SUB_2:%.*]] = add i8 [[A]], -1
; CHECK-NEXT: [[C_2:%.*]] = icmp eq i8 [[SUB_2]], 0
@@ -71,3 +71,203 @@ else:
%c.2 = icmp eq i8 %sub.2, 0
ret i1 %c.2
}
+
+; Test from https://github.com/llvm/llvm-project/issues/48253.
+define i1 @test_48253_eq_ne(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_48253_eq_ne(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP_1:%.*]] = icmp ne i8 [[A:%.*]], [[B:%.*]]
+; CHECK-NEXT: [[CMP_2:%.*]] = icmp eq i8 [[B]], 0
+; CHECK-NEXT: [[OR:%.*]] = or i1 [[CMP_1]], [[CMP_2]]
+; CHECK-NEXT: br i1 [[OR]], label [[EXIT_1:%.*]], label [[IF_END:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[SUB_1:%.*]] = add i8 [[B]], -1
+; CHECK-NEXT: [[T_1:%.*]] = icmp ult i8 [[SUB_1]], [[A]]
+; CHECK-NEXT: [[SUB_2:%.*]] = add i8 [[B]], -2
+; CHECK-NEXT: [[C_2:%.*]] = icmp ult i8 [[SUB_2]], [[A]]
+; CHECK-NEXT: [[XOR_1:%.*]] = xor i1 true, [[C_2]]
+; CHECK-NEXT: ret i1 [[XOR_1]]
+; CHECK: exit.1:
+; CHECK-NEXT: [[SUB_3:%.*]] = add i8 [[B]], -1
+; CHECK-NEXT: [[C_3:%.*]] = icmp ult i8 [[SUB_3]], [[A]]
+; CHECK-NEXT: [[SUB_4:%.*]] = add i8 [[B]], -2
+; CHECK-NEXT: [[C_4:%.*]] = icmp ult i8 [[SUB_4]], [[A]]
+; CHECK-NEXT: [[XOR_2:%.*]] = xor i1 [[C_3]], [[C_4]]
+; CHECK-NEXT: ret i1 [[XOR_2]]
+;
+entry:
+ %cmp.1 = icmp ne i8 %a, %b
+ %cmp.2 = icmp eq i8 %b, 0
+ %or = or i1 %cmp.1, %cmp.2
+ br i1 %or, label %exit.1, label %if.end
+
+if.end:
+ %sub.1 = add i8 %b, -1
+ %t.1 = icmp ult i8 %sub.1, %a
+ %sub.2 = add i8 %b, -2
+ %c.2 = icmp ult i8 %sub.2, %a
+ %xor.1 = xor i1 %t.1, %c.2
+ ret i1 %xor.1
+
+exit.1:
+ %sub.3 = add i8 %b, -1
+ %c.3 = icmp ult i8 %sub.3, %a
+ %sub.4 = add i8 %b, -2
+ %c.4 = icmp ult i8 %sub.4, %a
+ %xor.2 = xor i1 %c.3, %c.4
+ ret i1 %xor.2
+}
+
+define i1 @test_ult(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_ult(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP_1:%.*]] = icmp uge i8 [[A:%.*]], 20
+; CHECK-NEXT: [[CMP_2:%.*]] = icmp ult i8 [[A]], [[B:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP_1]], [[CMP_2]]
+; CHECK-NEXT: [[SUB_1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[SUB_2:%.*]] = add i8 [[A]], -2
+; CHECK-NEXT: [[SUB_3:%.*]] = add i8 [[A]], -20
+; CHECK-NEXT: [[SUB_4:%.*]] = add i8 [[A]], 21
+; CHECK-NEXT: [[ADD_1:%.*]] = add i8 [[A]], 1
+; CHECK-NEXT: br i1 [[AND]], label [[IF_END:%.*]], label [[EXIT_1:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[T_1:%.*]] = icmp ult i8 [[SUB_1]], [[B]]
+; CHECK-NEXT: [[T_2:%.*]] = icmp ult i8 [[SUB_2]], [[B]]
+; CHECK-NEXT: [[XOR_1:%.*]] = xor i1 true, true
+; CHECK-NEXT: [[T_3:%.*]] = icmp ult i8 [[SUB_3]], [[B]]
+; CHECK-NEXT: [[XOR_2:%.*]] = xor i1 [[XOR_1]], true
+; CHECK-NEXT: [[C_1:%.*]] = icmp ult i8 [[SUB_4]], [[B]]
+; CHECK-NEXT: [[XOR_3:%.*]] = xor i1 [[XOR_2]], [[C_1]]
+; CHECK-NEXT: [[C_2:%.*]] = icmp ult i8 [[ADD_1]], [[B]]
+; CHECK-NEXT: [[XOR_4:%.*]] = xor i1 [[XOR_3]], [[C_2]]
+; CHECK-NEXT: ret i1 [[XOR_4]]
+; CHECK: exit.1:
+; CHECK-NEXT: [[C_3:%.*]] = icmp ult i8 [[SUB_1]], [[B]]
+; CHECK-NEXT: [[C_4:%.*]] = icmp ult i8 [[SUB_2]], [[B]]
+; CHECK-NEXT: [[XOR_5:%.*]] = xor i1 [[C_3]], [[C_4]]
+; CHECK-NEXT: [[C_5:%.*]] = icmp ult i8 [[SUB_3]], [[B]]
+; CHECK-NEXT: [[XOR_6:%.*]] = xor i1 [[XOR_5]], [[C_5]]
+; CHECK-NEXT: [[C_6:%.*]] = icmp ult i8 [[SUB_4]], [[B]]
+; CHECK-NEXT: [[XOR_7:%.*]] = xor i1 [[XOR_6]], [[C_6]]
+; CHECK-NEXT: [[C_7:%.*]] = icmp ult i8 [[ADD_1]], [[B]]
+; CHECK-NEXT: [[XOR_8:%.*]] = xor i1 [[XOR_7]], [[C_7]]
+; CHECK-NEXT: ret i1 [[XOR_8]]
+;
+entry:
+ %cmp.1 = icmp uge i8 %a, 20
+ %cmp.2 = icmp ult i8 %a, %b
+ %and = and i1 %cmp.1, %cmp.2
+ %sub.1 = add i8 %a, -1
+ %sub.2 = add i8 %a, -2
+ %sub.3 = add i8 %a, -20
+ %sub.4 = add i8 %a, 21
+ %add.1 = add i8 %a, 1
+ br i1 %and, label %if.end, label %exit.1
+
+if.end:
+ %t.1 = icmp ult i8 %sub.1, %b
+ %t.2 = icmp ult i8 %sub.2, %b
+ %xor.1 = xor i1 %t.1, %t.2
+
+ %t.3 = icmp ult i8 %sub.3, %b
+ %xor.2 = xor i1 %xor.1, %t.3
+
+ %c.1 = icmp ult i8 %sub.4, %b
+ %xor.3 = xor i1 %xor.2, %c.1
+
+ %c.2 = icmp ult i8 %add.1, %b
+ %xor.4 = xor i1 %xor.3, %c.2
+ ret i1 %xor.4
+
+exit.1:
+ %c.3 = icmp ult i8 %sub.1, %b
+ %c.4 = icmp ult i8 %sub.2, %b
+ %xor.5 = xor i1 %c.3, %c.4
+
+ %c.5 = icmp ult i8 %sub.3, %b
+ %xor.6 = xor i1 %xor.5, %c.5
+
+ %c.6 = icmp ult i8 %sub.4, %b
+ %xor.7 = xor i1 %xor.6, %c.6
+
+ %c.7 = icmp ult i8 %add.1, %b
+ %xor.8 = xor i1 %xor.7, %c.7
+ ret i1 %xor.8
+}
+
+define i1 @test_slt(i8 %a, i8 %b) {
+; CHECK-LABEL: @test_slt(
+; CHECK-NEXT: entry:
+; CHECK-NEXT: [[CMP_1:%.*]] = icmp sge i8 [[A:%.*]], 20
+; CHECK-NEXT: [[CMP_2:%.*]] = icmp slt i8 [[A]], [[B:%.*]]
+; CHECK-NEXT: [[AND:%.*]] = and i1 [[CMP_1]], [[CMP_2]]
+; CHECK-NEXT: [[SUB_1:%.*]] = add i8 [[A]], -1
+; CHECK-NEXT: [[SUB_2:%.*]] = add i8 [[A]], -2
+; CHECK-NEXT: [[SUB_3:%.*]] = add i8 [[A]], -20
+; CHECK-NEXT: [[SUB_4:%.*]] = add i8 [[A]], 21
+; CHECK-NEXT: [[ADD_1:%.*]] = add i8 [[A]], 1
+; CHECK-NEXT: br i1 [[AND]], label [[IF_END:%.*]], label [[EXIT_1:%.*]]
+; CHECK: if.end:
+; CHECK-NEXT: [[T_1:%.*]] = icmp slt i8 [[SUB_1]], [[B]]
+; CHECK-NEXT: [[T_2:%.*]] = icmp slt i8 [[SUB_2]], [[B]]
+; CHECK-NEXT: [[XOR_1:%.*]] = xor i1 [[T_1]], [[T_2]]
+; CHECK-NEXT: [[T_3:%.*]] = icmp slt i8 [[SUB_3]], [[B]]
+; CHECK-NEXT: [[XOR_2:%.*]] = xor i1 [[XOR_1]], [[T_3]]
+; CHECK-NEXT: [[C_1:%.*]] = icmp slt i8 [[SUB_4]], [[B]]
+; CHECK-NEXT: [[XOR_3:%.*]] = xor i1 [[XOR_2]], [[C_1]]
+; CHECK-NEXT: [[C_2:%.*]] = icmp slt i8 [[ADD_1]], [[B]]
+; CHECK-NEXT: [[XOR_4:%.*]] = xor i1 [[XOR_3]], [[C_2]]
+; CHECK-NEXT: ret i1 [[XOR_4]]
+; CHECK: exit.1:
+; CHECK-NEXT: [[C_3:%.*]] = icmp slt i8 [[SUB_1]], [[B]]
+; CHECK-NEXT: [[C_4:%.*]] = icmp slt i8 [[SUB_2]], [[B]]
+; CHECK-NEXT: [[XOR_5:%.*]] = xor i1 [[C_3]], [[C_4]]
+; CHECK-NEXT: [[C_5:%.*]] = icmp slt i8 [[SUB_3]], [[B]]
+; CHECK-NEXT: [[XOR_6:%.*]] = xor i1 [[XOR_5]], [[C_5]]
+; CHECK-NEXT: [[C_6:%.*]] = icmp slt i8 [[SUB_4]], [[B]]
+; CHECK-NEXT: [[XOR_7:%.*]] = xor i1 [[XOR_6]], [[C_6]]
+; CHECK-NEXT: [[C_7:%.*]] = icmp slt i8 [[ADD_1]], [[B]]
+; CHECK-NEXT: [[XOR_8:%.*]] = xor i1 [[XOR_7]], [[C_7]]
+; CHECK-NEXT: ret i1 [[XOR_8]]
+;
+entry:
+ %cmp.1 = icmp sge i8 %a, 20
+ %cmp.2 = icmp slt i8 %a, %b
+ %and = and i1 %cmp.1, %cmp.2
+ %sub.1 = add i8 %a, -1
+ %sub.2 = add i8 %a, -2
+ %sub.3 = add i8 %a, -20
+ %sub.4 = add i8 %a, 21
+ %add.1 = add i8 %a, 1
+ br i1 %and, label %if.end, label %exit.1
+
+if.end:
+ %t.1 = icmp slt i8 %sub.1, %b
+ %t.2 = icmp slt i8 %sub.2, %b
+ %xor.1 = xor i1 %t.1, %t.2
+
+ %t.3 = icmp slt i8 %sub.3, %b
+ %xor.2 = xor i1 %xor.1, %t.3
+
+ %c.1 = icmp slt i8 %sub.4, %b
+ %xor.3 = xor i1 %xor.2, %c.1
+
+ %c.2 = icmp slt i8 %add.1, %b
+ %xor.4 = xor i1 %xor.3, %c.2
+ ret i1 %xor.4
+
+exit.1:
+ %c.3 = icmp slt i8 %sub.1, %b
+ %c.4 = icmp slt i8 %sub.2, %b
+ %xor.5 = xor i1 %c.3, %c.4
+
+ %c.5 = icmp slt i8 %sub.3, %b
+ %xor.6 = xor i1 %xor.5, %c.5
+
+ %c.6 = icmp slt i8 %sub.4, %b
+ %xor.7 = xor i1 %xor.6, %c.6
+
+ %c.7 = icmp slt i8 %add.1, %b
+ %xor.8 = xor i1 %xor.7, %c.7
+ ret i1 %xor.8
+}
diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr-swift.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr-swift.ll
new file mode 100644
index 000000000000..38d50696f417
--- /dev/null
+++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr-swift.ll
@@ -0,0 +1,442 @@
+; Tests whether we properly setup llvm.dbg.addr for Swift.
+;
+; Since we do not have any guarantees around the usage of llvm.dbg.addr, we can
+; not propagate them like we do llvm.dbg.declare into funclets. But if users
+; create the debug_value for us, make sure that we propagate llvm.dbg.addr into
+; the beginning coroutine and all other funclets.
+
+; RUN: opt %s -passes='function(coro-early),cgscc(coro-split,simplifycfg)' -S | FileCheck %s
+
+; CHECK-LABEL: define swifttailcc void @"$s10async_args14withGenericArgyyxnYalF"(%swift.context* swiftasync %0, %swift.opaque* noalias %1, %swift.type* %T){{.*}} {
+; CHECK: call void @llvm.dbg.declare(
+; CHECK: llvm.dbg.addr
+; CHECK-NOT: llvm.dbg.value
+; CHECK-NOT: llvm.dbg.addr
+; CHECK-NOT: llvm.dbg.declare
+; CHECK: musttail call swifttailcc void @swift_task_switch(%swift.context* swiftasync %19, i8* bitcast (void (i8*)* @"$s10async_args14withGenericArgyyxnYalFTY0_" to i8*), i64 0, i64 0)
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+; CHECK-LABEL: define internal swifttailcc void @"$s10async_args14withGenericArgyyxnYalFTY0_"(i8* swiftasync %0)
+; CHECK: entryresume.0
+; CHECK-NEXT: %.debug
+; CHECK-NEXT: call void @llvm.dbg.declare(
+; CHECK: llvm.dbg.addr
+; CHECK: musttail call swifttailcc void @"$s10async_args10forceSplityyYaF"(%swift.context* swiftasync
+; CHECK-NEXT: ret void
+; CHECK-NEXT: }
+
+; CHECK: define internal swifttailcc void @"$s10async_args14withGenericArgyyxnYalFTQ1_"(i8* swiftasync %0)
+; CHECK: llvm.dbg.declare
+; CHECK: llvm.dbg.addr
+; CHECK: llvm.dbg.value(metadata %swift.opaque** undef,
+; CHECK: ret void
+; CHECK-NEXT: }
+
+; ModuleID = 'async_args.ll'
+source_filename = "async_args.ll"
+target datalayout = "e-m:o-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:128-n8:16:32:64-S128"
+target triple = "x86_64-apple-macosx11.0.0"
+
+%swift.async_func_pointer = type <{ i32, i32 }>
+%swift.opaque = type opaque
+%swift.type = type { i64 }
+%swift.context = type { %swift.context*, void (%swift.context*)*, i64 }
+%swift.vwtable = type { i8*, i8*, i8*, i8*, i8*, i8*, i8*, i8*, i64, i64, i32, i32 }
+
+@"$s10async_args10forceSplityyYaFTu" = global %swift.async_func_pointer <{ i32 trunc (i64 sub (i64 ptrtoint (void (%swift.context*)* @"$s10async_args10forceSplityyYaF" to i64), i64 ptrtoint (%swift.async_func_pointer* @"$s10async_args10forceSplityyYaFTu" to i64)) to i32), i32 20 }>, align 8
+@"$s10async_args14withGenericArgyyxnYalFTu" = global %swift.async_func_pointer <{ i32 trunc (i64 sub (i64 ptrtoint (void (%swift.context*, %swift.opaque*, %swift.type*)* @"$s10async_args14withGenericArgyyxnYalF" to i64), i64 ptrtoint (%swift.async_func_pointer* @"$s10async_args14withGenericArgyyxnYalFTu" to i64)) to i32), i32 20 }>, align 8
+@"_swift_FORCE_LOAD_$_swiftCompatibilityConcurrency_$_async_args" = weak_odr hidden constant void ()* @"_swift_FORCE_LOAD_$_swiftCompatibilityConcurrency"
+@__swift_reflection_version = linkonce_odr hidden constant i16 3
+@swift_async_extendedFramePointerFlags = extern_weak global i8*
+@_swift_async_extendedFramePointerFlagsUser = linkonce_odr hidden global i8** @swift_async_extendedFramePointerFlags
+@llvm.used = appending global [10 x i8*] [i8* bitcast (void (%swift.opaque*, %swift.type*)* @"$s10async_args3useyyxlF" to i8*), i8* bitcast (void (%swift.opaque*, %swift.type*)* @"$s10async_args4use2yyxlF" to i8*), i8* bitcast (void (%swift.context*)* @"$s10async_args10forceSplityyYaF" to i8*), i8* bitcast (%swift.async_func_pointer* @"$s10async_args10forceSplityyYaFTu" to i8*), i8* bitcast (void (%swift.opaque*, %swift.type*)* @"$s10async_args4use3yyxlF" to i8*), i8* bitcast (void (%swift.context*, %swift.opaque*, %swift.type*)* @"$s10async_args14withGenericArgyyxnYalF" to i8*), i8* bitcast (%swift.async_func_pointer* @"$s10async_args14withGenericArgyyxnYalFTu" to i8*), i8* bitcast (void ()** @"_swift_FORCE_LOAD_$_swiftCompatibilityConcurrency_$_async_args" to i8*), i8* bitcast (i16* @__swift_reflection_version to i8*), i8* bitcast (i8*** @_swift_async_extendedFramePointerFlagsUser to i8*)], section "llvm.metadata"
+
+define hidden swiftcc i1 @"$s10async_args7booleanSbvg"() #0 !dbg !31 {
+entry:
+ ret i1 false, !dbg !37
+}
+
+define swiftcc void @"$s10async_args3useyyxlF"(%swift.opaque* noalias nocapture %0, %swift.type* %T) #0 !dbg !39 {
+entry:
+ %T1 = alloca %swift.type*, align 8
+ %t.debug = alloca %swift.opaque*, align 8
+ %1 = bitcast %swift.opaque** %t.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %1, i8 0, i64 8, i1 false)
+ store %swift.type* %T, %swift.type** %T1, align 8
+ call void @llvm.dbg.declare(metadata %swift.type** %T1, metadata !45, metadata !DIExpression()), !dbg !52
+ store %swift.opaque* %0, %swift.opaque** %t.debug, align 8, !dbg !52
+ call void @llvm.dbg.addr(metadata %swift.opaque** %t.debug, metadata !50, metadata !DIExpression(DW_OP_deref)), !dbg !53
+ ret void, !dbg !54
+}
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #1
+
+; Function Attrs: argmemonly nofree nounwind willreturn writeonly
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #2
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare void @llvm.dbg.addr(metadata, metadata, metadata) #1
+
+declare void @llvm.dbg.value(metadata, metadata, metadata) #1
+
+define swiftcc void @"$s10async_args4use2yyxlF"(%swift.opaque* noalias nocapture %0, %swift.type* %T) #0 !dbg !56 {
+entry:
+ %T1 = alloca %swift.type*, align 8
+ %t.debug = alloca %swift.opaque*, align 8
+ %1 = bitcast %swift.opaque** %t.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %1, i8 0, i64 8, i1 false)
+ store %swift.type* %T, %swift.type** %T1, align 8
+ call void @llvm.dbg.declare(metadata %swift.type** %T1, metadata !58, metadata !DIExpression()), !dbg !60
+ store %swift.opaque* %0, %swift.opaque** %t.debug, align 8, !dbg !60
+ call void @llvm.dbg.addr(metadata %swift.opaque** %t.debug, metadata !59, metadata !DIExpression(DW_OP_deref)), !dbg !61
+ ret void, !dbg !62
+}
+
+declare swifttailcc void @"$s10async_args10forceSplityyYaF"(%swift.context* swiftasync %0) #0
+
+; Function Attrs: nounwind
+declare token @llvm.coro.id.async(i32, i32, i32, i8*) #3
+
+; Function Attrs: cold noreturn nounwind
+declare void @llvm.trap() #4
+
+; Function Attrs: nounwind
+declare i8* @llvm.coro.begin(token, i8* writeonly) #3
+
+; Function Attrs: nounwind
+define internal swifttailcc void @__swift_suspend_dispatch_1(i8* %0, %swift.context* %1) #3 !dbg !69 {
+entry:
+ %2 = bitcast i8* %0 to void (%swift.context*)*, !dbg !71
+ musttail call swifttailcc void %2(%swift.context* swiftasync %1), !dbg !71
+ ret void, !dbg !71
+}
+
+; Function Attrs: nounwind
+declare i1 @llvm.coro.end.async(i8*, i1, ...) #3
+
+define swiftcc void @"$s10async_args4use3yyxlF"(%swift.opaque* noalias nocapture %0, %swift.type* %T) #0 !dbg !72 {
+entry:
+ %T1 = alloca %swift.type*, align 8
+ %t.debug = alloca %swift.opaque*, align 8
+ %1 = bitcast %swift.opaque** %t.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %1, i8 0, i64 8, i1 false)
+ store %swift.type* %T, %swift.type** %T1, align 8
+ call void @llvm.dbg.declare(metadata %swift.type** %T1, metadata !74, metadata !DIExpression()), !dbg !76
+ store %swift.opaque* %0, %swift.opaque** %t.debug, align 8, !dbg !76
+ call void @llvm.dbg.addr(metadata %swift.opaque** %t.debug, metadata !75, metadata !DIExpression(DW_OP_deref)), !dbg !77
+ ret void, !dbg !78
+}
+
+define swifttailcc void @"$s10async_args14withGenericArgyyxnYalF"(%swift.context* swiftasync %0, %swift.opaque* noalias nocapture %1, %swift.type* %T) #0 !dbg !80 {
+entry:
+ call void @llvm.dbg.declare(metadata %swift.type* %T, metadata !82, metadata !DIExpression()), !dbg !84
+ %2 = alloca %swift.context*, align 8
+ %msg.debug = alloca %swift.opaque*, align 8
+ %3 = bitcast %swift.context* %0 to <{ %swift.context*, void (%swift.context*)*, i32 }>*
+ %4 = call token @llvm.coro.id.async(i32 20, i32 16, i32 0, i8* bitcast (%swift.async_func_pointer* @"$s10async_args14withGenericArgyyxnYalFTu" to i8*))
+ %5 = call i8* @llvm.coro.begin(token %4, i8* null)
+ store %swift.context* %0, %swift.context** %2, align 8
+ %6 = bitcast %swift.opaque** %msg.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %6, i8 0, i64 8, i1 false)
+ %7 = bitcast %swift.opaque** %msg.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %7, i8 0, i64 8, i1 false)
+ %8 = bitcast %swift.opaque** %msg.debug to i8*
+ call void @llvm.memset.p0i8.i64(i8* align 8 %8, i8 0, i64 8, i1 false)
+ %9 = bitcast %swift.type* %T to i8***, !dbg !85
+ %10 = getelementptr inbounds i8**, i8*** %9, i64 -1, !dbg !85
+ %T.valueWitnesses = load i8**, i8*** %10, align 8, !dbg !85, !invariant.load !36, !dereferenceable !88
+ %11 = bitcast i8** %T.valueWitnesses to %swift.vwtable*, !dbg !85
+ %12 = getelementptr inbounds %swift.vwtable, %swift.vwtable* %11, i32 0, i32 8, !dbg !85
+ %size = load i64, i64* %12, align 8, !dbg !85, !invariant.load !36
+ %13 = add i64 %size, 15, !dbg !85
+ %14 = and i64 %13, -16, !dbg !85
+ %15 = call swiftcc i8* @swift_task_alloc(i64 %14) #3, !dbg !85
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %15), !dbg !85
+ %16 = bitcast i8* %15 to %swift.opaque*, !dbg !85
+ store %swift.opaque* %1, %swift.opaque** %msg.debug, align 8, !dbg !84
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !89
+ call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata !83, metadata !DIExpression(DW_OP_deref)), !dbg !91
+ %17 = call i8* @llvm.coro.async.resume(), !dbg !84
+ %18 = load %swift.context*, %swift.context** %2, align 8, !dbg !84
+ %19 = load %swift.context*, %swift.context** %2, align 8, !dbg !84
+ %20 = call { i8* } (i32, i8*, i8*, ...) @llvm.coro.suspend.async.sl_p0i8s(i32 0, i8* %17, i8* bitcast (i8* (i8*)* @__swift_async_resume_get_context to i8*), i8* bitcast (void (i8*, i64, i64, %swift.context*)* @__swift_suspend_point to i8*), i8* %17, i64 0, i64 0, %swift.context* %19), !dbg !84
+ %21 = extractvalue { i8* } %20, 0, !dbg !84
+ %22 = call i8* @__swift_async_resume_get_context(i8* %21), !dbg !84
+ %23 = bitcast i8* %22 to %swift.context*, !dbg !84
+ store %swift.context* %23, %swift.context** %2, align 8, !dbg !84
+ store %swift.opaque* %1, %swift.opaque** %msg.debug, align 8, !dbg !84
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !89
+ call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata !83, metadata !DIExpression(DW_OP_deref)), !dbg !91
+ %24 = getelementptr inbounds i8*, i8** %T.valueWitnesses, i32 2, !dbg !92
+ %25 = load i8*, i8** %24, align 8, !dbg !92, !invariant.load !36
+ %initializeWithCopy = bitcast i8* %25 to %swift.opaque* (%swift.opaque*, %swift.opaque*, %swift.type*)*, !dbg !92
+ %26 = call %swift.opaque* %initializeWithCopy(%swift.opaque* noalias %16, %swift.opaque* noalias %1, %swift.type* %T) #3, !dbg !92
+ call swiftcc void @"$s10async_args4use3yyxlF"(%swift.opaque* noalias nocapture %16, %swift.type* %T), !dbg !93
+ %27 = getelementptr inbounds i8*, i8** %T.valueWitnesses, i32 1, !dbg !93
+ %28 = load i8*, i8** %27, align 8, !dbg !93, !invariant.load !36
+ %destroy = bitcast i8* %28 to void (%swift.opaque*, %swift.type*)*, !dbg !93
+ call void %destroy(%swift.opaque* noalias %16, %swift.type* %T) #3, !dbg !93
+ %29 = load i32, i32* getelementptr inbounds (%swift.async_func_pointer, %swift.async_func_pointer* @"$s10async_args10forceSplityyYaFTu", i32 0, i32 1), align 8, !dbg !94
+ %30 = zext i32 %29 to i64, !dbg !94
+ %31 = call swiftcc i8* @swift_task_alloc(i64 %30) #3, !dbg !94
+ call void @llvm.lifetime.start.p0i8(i64 -1, i8* %31), !dbg !94
+ %32 = bitcast i8* %31 to <{ %swift.context*, void (%swift.context*)*, i32 }>*, !dbg !94
+ %33 = load %swift.context*, %swift.context** %2, align 8, !dbg !94
+ %34 = getelementptr inbounds <{ %swift.context*, void (%swift.context*)*, i32 }>, <{ %swift.context*, void (%swift.context*)*, i32 }>* %32, i32 0, i32 0, !dbg !94
+ store %swift.context* %33, %swift.context** %34, align 8, !dbg !94
+ %35 = call i8* @llvm.coro.async.resume(), !dbg !94
+ %36 = bitcast i8* %35 to void (%swift.context*)*, !dbg !94
+ %37 = getelementptr inbounds <{ %swift.context*, void (%swift.context*)*, i32 }>, <{ %swift.context*, void (%swift.context*)*, i32 }>* %32, i32 0, i32 1, !dbg !94
+ store void (%swift.context*)* %36, void (%swift.context*)** %37, align 8, !dbg !94
+ %38 = bitcast i8* %31 to %swift.context*, !dbg !94
+ %39 = call { i8* } (i32, i8*, i8*, ...) @llvm.coro.suspend.async.sl_p0i8s(i32 0, i8* %35, i8* bitcast (i8* (i8*)* @__swift_async_resume_project_context to i8*), i8* bitcast (void (i8*, %swift.context*)* @__swift_suspend_dispatch_1.1 to i8*), i8* bitcast (void (%swift.context*)* @"$s10async_args10forceSplityyYaF" to i8*), %swift.context* %38), !dbg !94
+ %40 = extractvalue { i8* } %39, 0, !dbg !94
+ %41 = call i8* @__swift_async_resume_project_context(i8* %40), !dbg !94
+ %42 = bitcast i8* %41 to %swift.context*, !dbg !94
+ store %swift.context* %42, %swift.context** %2, align 8, !dbg !94
+ call swiftcc void @swift_task_dealloc(i8* %31) #3, !dbg !94
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %31), !dbg !94
+ %43 = call i8* @llvm.coro.async.resume(), !dbg !94
+ %44 = load %swift.context*, %swift.context** %2, align 8, !dbg !94
+ %45 = call { i8* } (i32, i8*, i8*, ...) @llvm.coro.suspend.async.sl_p0i8s(i32 0, i8* %43, i8* bitcast (i8* (i8*)* @__swift_async_resume_get_context to i8*), i8* bitcast (void (i8*, i64, i64, %swift.context*)* @__swift_suspend_point to i8*), i8* %43, i64 0, i64 0, %swift.context* %44), !dbg !94
+ %46 = extractvalue { i8* } %45, 0, !dbg !94
+ %47 = call i8* @__swift_async_resume_get_context(i8* %46), !dbg !94
+ %48 = bitcast i8* %47 to %swift.context*, !dbg !94
+ store %swift.context* %48, %swift.context** %2, align 8, !dbg !94
+ store %swift.opaque* %1, %swift.opaque** %msg.debug, align 8, !dbg !84
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !89
+ call void @llvm.dbg.addr(metadata %swift.opaque** %msg.debug, metadata !83, metadata !DIExpression(DW_OP_deref)), !dbg !91
+ %49 = call swiftcc i1 @"$s10async_args7booleanSbvg"(), !dbg !95
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !95
+ br i1 %49, label %50, label %52, !dbg !95
+
+50: ; preds = %entry
+ %51 = call %swift.opaque* %initializeWithCopy(%swift.opaque* noalias %16, %swift.opaque* noalias %1, %swift.type* %T) #3, !dbg !97
+ call swiftcc void @"$s10async_args3useyyxlF"(%swift.opaque* noalias nocapture %16, %swift.type* %T), !dbg !99
+ call void %destroy(%swift.opaque* noalias %16, %swift.type* %T) #3, !dbg !100
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !100
+ call void @llvm.dbg.value(metadata %swift.opaque** undef, metadata !83, metadata !DIExpression()), !dbg !91
+ br label %54, !dbg !100
+
+52: ; preds = %entry
+ %53 = call %swift.opaque* %initializeWithCopy(%swift.opaque* noalias %16, %swift.opaque* noalias %1, %swift.type* %T) #3, !dbg !101
+ call swiftcc void @"$s10async_args4use2yyxlF"(%swift.opaque* noalias nocapture %16, %swift.type* %T), !dbg !103
+ call void %destroy(%swift.opaque* noalias %16, %swift.type* %T) #3, !dbg !104
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !104
+ br label %54, !dbg !104
+
+54: ; preds = %50, %52
+ call void %destroy(%swift.opaque* noalias %1, %swift.type* %T) #3, !dbg !105
+ %55 = bitcast %swift.opaque* %16 to i8*, !dbg !105
+ call void @llvm.lifetime.end.p0i8(i64 -1, i8* %55), !dbg !105
+ call swiftcc void @swift_task_dealloc(i8* %15) #3, !dbg !105
+ call void asm sideeffect "", "r"(%swift.opaque** %msg.debug), !dbg !105
+ %56 = load %swift.context*, %swift.context** %2, align 8, !dbg !105
+ %57 = bitcast %swift.context* %56 to <{ %swift.context*, void (%swift.context*)*, i32 }>*, !dbg !105
+ %58 = getelementptr inbounds <{ %swift.context*, void (%swift.context*)*, i32 }>, <{ %swift.context*, void (%swift.context*)*, i32 }>* %57, i32 0, i32 1, !dbg !105
+ %59 = load void (%swift.context*)*, void (%swift.context*)** %58, align 8, !dbg !105
+ %60 = load %swift.context*, %swift.context** %2, align 8, !dbg !105
+ %61 = bitcast void (%swift.context*)* %59 to i8*, !dbg !105
+ %62 = call i1 (i8*, i1, ...) @llvm.coro.end.async(i8* %5, i1 false, void (i8*, %swift.context*)* @__swift_suspend_dispatch_1.2, i8* %61, %swift.context* %60), !dbg !105
+ unreachable, !dbg !105
+}
+
+; Function Attrs: argmemonly nounwind
+declare extern_weak swiftcc i8* @swift_task_alloc(i64) #5
+
+; Function Attrs: argmemonly nofree nosync nounwind willreturn
+declare void @llvm.lifetime.start.p0i8(i64 immarg, i8* nocapture) #6
+
+; Function Attrs: nounwind
+declare i8* @llvm.coro.async.resume() #3
+
+; Function Attrs: nounwind
+define linkonce_odr hidden i8* @__swift_async_resume_get_context(i8* %0) #7 !dbg !106 {
+entry:
+ ret i8* %0, !dbg !107
+}
+
+; Function Attrs: nounwind
+define internal swifttailcc void @__swift_suspend_point(i8* %0, i64 %1, i64 %2, %swift.context* %3) #3 !dbg !108 {
+entry:
+ musttail call swifttailcc void @swift_task_switch(%swift.context* swiftasync %3, i8* %0, i64 %1, i64 %2) #3, !dbg !109
+ ret void, !dbg !109
+}
+
+; Function Attrs: nounwind
+declare extern_weak swifttailcc void @swift_task_switch(%swift.context*, i8*, i64, i64) #3
+
+; Function Attrs: nounwind
+declare { i8* } @llvm.coro.suspend.async.sl_p0i8s(i32, i8*, i8*, ...) #3
+
+; Function Attrs: alwaysinline nounwind
+define linkonce_odr hidden i8* @__swift_async_resume_project_context(i8* %0) #8 !dbg !110 {
+entry:
+ %1 = bitcast i8* %0 to i8**, !dbg !111
+ %2 = load i8*, i8** %1, align 8, !dbg !111
+ %3 = call i8** @llvm.swift.async.context.addr(), !dbg !111
+ store i8* %2, i8** %3, align 8, !dbg !111
+ ret i8* %2, !dbg !111
+}
+
+; Function Attrs: nounwind readnone
+declare i8** @llvm.swift.async.context.addr() #9
+
+; Function Attrs: nounwind
+define internal swifttailcc void @__swift_suspend_dispatch_1.1(i8* %0, %swift.context* %1) #3 !dbg !112 {
+entry:
+ %2 = bitcast i8* %0 to void (%swift.context*)*, !dbg !113
+ musttail call swifttailcc void %2(%swift.context* swiftasync %1), !dbg !113
+ ret void, !dbg !113
+}
+
+; Function Attrs: argmemonly nounwind
+declare extern_weak swiftcc void @swift_task_dealloc(i8*) #5
+
+; Function Attrs: argmemonly nofree nosync nounwind willreturn
+declare void @llvm.lifetime.end.p0i8(i64 immarg, i8* nocapture) #6
+
+; Function Attrs: nounwind
+define internal swifttailcc void @__swift_suspend_dispatch_1.2(i8* %0, %swift.context* %1) #3 !dbg !114 {
+entry:
+ %2 = bitcast i8* %0 to void (%swift.context*)*, !dbg !115
+ musttail call swifttailcc void %2(%swift.context* swiftasync %1), !dbg !115
+ ret void, !dbg !115
+}
+
+declare extern_weak void @"_swift_FORCE_LOAD_$_swiftCompatibilityConcurrency"()
+
+attributes #0 = { "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" }
+attributes #1 = { nofree nosync nounwind readnone speculatable willreturn }
+attributes #2 = { argmemonly nofree nounwind willreturn writeonly }
+attributes #3 = { nounwind }
+attributes #4 = { cold noreturn nounwind }
+attributes #5 = { argmemonly nounwind }
+attributes #6 = { argmemonly nofree nosync nounwind willreturn }
+attributes #7 = { nounwind "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" }
+attributes #8 = { alwaysinline nounwind "frame-pointer"="all" "no-trapping-math"="true" "stack-protector-buffer-size"="8" "target-cpu"="penryn" "target-features"="+cx16,+cx8,+fxsr,+mmx,+sahf,+sse,+sse2,+sse3,+sse4.1,+ssse3,+x87" "tune-cpu"="generic" }
+attributes #9 = { nounwind readnone }
+
+!llvm.dbg.cu = !{!0, !11}
+!swift.module.flags = !{!13}
+!llvm.module.flags = !{!14, !15, !16, !17, !18, !19, !20, !21, !22, !23, !24, !25}
+!llvm.linker.options = !{!26, !27, !28, !29, !30}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_Swift, file: !1, producer: "Swift version 5.7-dev (LLVM 8abcd8862898818, Swift 59a3bd190248a0e)", isOptimized: false, runtimeVersion: 5, emissionKind: FullDebug, imports: !2)
+!1 = !DIFile(filename: "async_args.swift", directory: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64/tmp/swift")
+!2 = !{!3, !5, !7, !9}
+!3 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !1, entity: !4, file: !1)
+!4 = !DIModule(scope: null, name: "async_args")
+!5 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !1, entity: !6, file: !1)
+!6 = !DIModule(scope: null, name: "Swift", includePath: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64/lib/swift/macosx/Swift.swiftmodule/x86_64-apple-macos.swiftmodule")
+!7 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !1, entity: !8, file: !1)
+!8 = !DIModule(scope: null, name: "_Concurrency", includePath: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64/lib/swift/macosx/_Concurrency.swiftmodule/x86_64-apple-macos.swiftmodule")
+!9 = !DIImportedEntity(tag: DW_TAG_imported_module, scope: !1, entity: !10, file: !1)
+!10 = !DIModule(scope: null, name: "SwiftOnoneSupport", includePath: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64/lib/swift/macosx/SwiftOnoneSupport.swiftmodule/x86_64-apple-macos.swiftmodule")
+!11 = distinct !DICompileUnit(language: DW_LANG_ObjC, file: !12, producer: "clang version 13.0.0 (git@github.com:apple/llvm-project.git 8abcd8862898818152e04399a042997bc185a0e9)", isOptimized: false, runtimeVersion: 2, emissionKind: FullDebug, splitDebugInlining: false, nameTableKind: None, sysroot: "/")
+!12 = !DIFile(filename: "<swift-imported-modules>", directory: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64/tmp/swift")
+!13 = !{!"standard-library", i1 false}
+!14 = !{i32 1, !"Objective-C Version", i32 2}
+!15 = !{i32 1, !"Objective-C Image Info Version", i32 0}
+!16 = !{i32 1, !"Objective-C Image Info Section", !"__DATA,__objc_imageinfo,regular,no_dead_strip"}
+!17 = !{i32 4, !"Objective-C Garbage Collection", i32 84346624}
+!18 = !{i32 1, !"Objective-C Class Properties", i32 64}
+!19 = !{i32 7, !"Dwarf Version", i32 4}
+!20 = !{i32 2, !"Debug Info Version", i32 3}
+!21 = !{i32 1, !"wchar_size", i32 4}
+!22 = !{i32 7, !"PIC Level", i32 2}
+!23 = !{i32 7, !"uwtable", i32 1}
+!24 = !{i32 7, !"frame-pointer", i32 2}
+!25 = !{i32 1, !"Swift Version", i32 7}
+!26 = !{!"-lswiftSwiftOnoneSupport"}
+!27 = !{!"-lswiftCore"}
+!28 = !{!"-lswift_Concurrency"}
+!29 = !{!"-lobjc"}
+!30 = !{!"-lswiftCompatibilityConcurrency"}
+!31 = distinct !DISubprogram(name: "boolean.get", linkageName: "$s10async_args7booleanSbvg", scope: !4, file: !1, line: 6, type: !32, scopeLine: 6, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!32 = !DISubroutineType(types: !33)
+!33 = !{!34}
+!34 = !DICompositeType(tag: DW_TAG_structure_type, name: "Bool", scope: !6, file: !35, size: 8, elements: !36, runtimeLang: DW_LANG_Swift, identifier: "$sSbD")
+!35 = !DIFile(filename: "lib/swift/macosx/Swift.swiftmodule/x86_64-apple-macos.swiftmodule", directory: "/Volumes/Data/work/solon/build/Ninja+cmark-DebugAssert+llvm-RelWithDebInfoAssert+swift-DebugAssert+stdlib-DebugAssert/swift-macosx-x86_64")
+!36 = !{}
+!37 = !DILocation(line: 6, column: 27, scope: !38)
+!38 = distinct !DILexicalBlock(scope: !31, file: !1, line: 6, column: 19)
+!39 = distinct !DISubprogram(name: "use", linkageName: "$s10async_args3useyyxlF", scope: !4, file: !1, line: 8, type: !40, scopeLine: 8, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !44)
+!40 = !DISubroutineType(types: !41)
+!41 = !{!42, !43}
+!42 = !DICompositeType(tag: DW_TAG_structure_type, name: "$sytD", file: !1, elements: !36, runtimeLang: DW_LANG_Swift, identifier: "$sytD")
+!43 = !DICompositeType(tag: DW_TAG_structure_type, name: "$sxD", file: !1, runtimeLang: DW_LANG_Swift, identifier: "$sxD")
+!44 = !{!45, !50}
+!45 = !DILocalVariable(name: "$\CF\84_0_0", scope: !39, file: !1, type: !46, flags: DIFlagArtificial)
+!46 = !DIDerivedType(tag: DW_TAG_typedef, name: "T", scope: !48, file: !47, baseType: !49)
+!47 = !DIFile(filename: "<compiler-generated>", directory: "")
+!48 = !DIModule(scope: null, name: "Builtin")
+!49 = !DIDerivedType(tag: DW_TAG_pointer_type, name: "$sBpD", baseType: null, size: 64)
+!50 = !DILocalVariable(name: "t", arg: 1, scope: !39, file: !1, line: 8, type: !51)
+!51 = !DIDerivedType(tag: DW_TAG_const_type, baseType: !43)
+!52 = !DILocation(line: 0, scope: !39)
+!53 = !DILocation(line: 8, column: 20, scope: !39)
+!54 = !DILocation(line: 8, column: 29, scope: !55)
+!55 = distinct !DILexicalBlock(scope: !39, file: !1, line: 8, column: 28)
+!56 = distinct !DISubprogram(name: "use2", linkageName: "$s10async_args4use2yyxlF", scope: !4, file: !1, line: 9, type: !40, scopeLine: 9, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !57)
+!57 = !{!58, !59}
+!58 = !DILocalVariable(name: "$\CF\84_0_0", scope: !56, file: !1, type: !46, flags: DIFlagArtificial)
+!59 = !DILocalVariable(name: "t", arg: 1, scope: !56, file: !1, line: 9, type: !51)
+!60 = !DILocation(line: 0, scope: !56)
+!61 = !DILocation(line: 9, column: 21, scope: !56)
+!62 = !DILocation(line: 9, column: 30, scope: !63)
+!63 = distinct !DILexicalBlock(scope: !56, file: !1, line: 9, column: 29)
+!64 = distinct !DISubprogram(name: "forceSplit", linkageName: "$s10async_args10forceSplityyYaF", scope: !4, file: !1, line: 10, type: !65, scopeLine: 10, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!65 = !DISubroutineType(types: !66)
+!66 = !{!42}
+!67 = !DILocation(line: 11, column: 1, scope: !68)
+!68 = distinct !DILexicalBlock(scope: !64, file: !1, line: 10, column: 32)
+!69 = distinct !DISubprogram(linkageName: "__swift_suspend_dispatch_1", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!70 = !DISubroutineType(types: null)
+!71 = !DILocation(line: 0, scope: !69)
+!72 = distinct !DISubprogram(name: "use3", linkageName: "$s10async_args4use3yyxlF", scope: !4, file: !1, line: 12, type: !40, scopeLine: 12, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !73)
+!73 = !{!74, !75}
+!74 = !DILocalVariable(name: "$\CF\84_0_0", scope: !72, file: !1, type: !46, flags: DIFlagArtificial)
+!75 = !DILocalVariable(name: "t", arg: 1, scope: !72, file: !1, line: 12, type: !51)
+!76 = !DILocation(line: 0, scope: !72)
+!77 = !DILocation(line: 12, column: 21, scope: !72)
+!78 = !DILocation(line: 12, column: 30, scope: !79)
+!79 = distinct !DILexicalBlock(scope: !72, file: !1, line: 12, column: 29)
+!80 = distinct !DISubprogram(name: "withGenericArg", linkageName: "$s10async_args14withGenericArgyyxnYalF", scope: !4, file: !1, line: 14, type: !40, scopeLine: 14, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !81)
+!81 = !{!82, !83, !83, !83}
+!82 = !DILocalVariable(name: "$\CF\84_0_0", scope: !80, file: !1, type: !46, flags: DIFlagArtificial)
+!83 = !DILocalVariable(name: "msg", arg: 1, scope: !80, file: !1, line: 14, type: !51)
+!84 = !DILocation(line: 0, scope: !80)
+!85 = !DILocation(line: 0, scope: !86)
+!86 = !DILexicalBlockFile(scope: !87, file: !47, discriminator: 0)
+!87 = distinct !DILexicalBlock(scope: !80, file: !1, line: 14, column: 55)
+!88 = !{i64 96}
+!89 = !DILocation(line: 0, scope: !90)
+!90 = !DILexicalBlockFile(scope: !80, file: !47, discriminator: 0)
+!91 = !DILocation(line: 14, column: 31, scope: !80)
+!92 = !DILocation(line: 15, column: 10, scope: !87)
+!93 = !DILocation(line: 15, column: 5, scope: !87)
+!94 = !DILocation(line: 24, column: 9, scope: !87)
+!95 = !DILocation(line: 35, column: 6, scope: !96)
+!96 = distinct !DILexicalBlock(scope: !87, file: !1, line: 35, column: 3)
+!97 = !DILocation(line: 36, column: 11, scope: !98)
+!98 = distinct !DILexicalBlock(scope: !96, file: !1, line: 35, column: 14)
+!99 = !DILocation(line: 36, column: 7, scope: !98)
+!100 = !DILocation(line: 37, column: 3, scope: !96)
+!101 = !DILocation(line: 38, column: 12, scope: !102)
+!102 = distinct !DILexicalBlock(scope: !87, file: !1, line: 37, column: 10)
+!103 = !DILocation(line: 38, column: 7, scope: !102)
+!104 = !DILocation(line: 39, column: 3, scope: !87)
+!105 = !DILocation(line: 40, column: 1, scope: !87)
+!106 = distinct !DISubprogram(linkageName: "__swift_async_resume_get_context", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!107 = !DILocation(line: 0, scope: !106)
+!108 = distinct !DISubprogram(linkageName: "__swift_suspend_point", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!109 = !DILocation(line: 0, scope: !108)
+!110 = distinct !DISubprogram(linkageName: "__swift_async_resume_project_context", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!111 = !DILocation(line: 0, scope: !110)
+!112 = distinct !DISubprogram(linkageName: "__swift_suspend_dispatch_1.1", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!113 = !DILocation(line: 0, scope: !112)
+!114 = distinct !DISubprogram(linkageName: "__swift_suspend_dispatch_1.2", scope: !4, file: !47, type: !70, flags: DIFlagArtificial, spFlags: DISPFlagLocalToUnit | DISPFlagDefinition, unit: !0, retainedNodes: !36)
+!115 = !DILocation(line: 0, scope: !114)
diff --git a/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr.ll b/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr.ll
new file mode 100644
index 000000000000..19dbb1d6c8ea
--- /dev/null
+++ b/llvm/test/Transforms/Coroutines/coro-debug-dbg.addr.ll
@@ -0,0 +1,257 @@
+; Tests whether we properly setup llvm.dbg.addr.
+;
+; Since we do not have any guarantees around the usage of llvm.dbg.addr, we can
+; not propagate them like we do llvm.dbg.declare into funclets. But if users
+; create the debug_value for us, make sure that we propagate llvm.dbg.addr into
+; the beginning coroutine and all other funclets.
+
+; RUN: opt < %s -passes='function(coro-early),cgscc(coro-split,coro-split)' -S | FileCheck %s
+
+; This file is based on coro-debug-frame-variable.ll.
+; CHECK: define internal fastcc void @f.resume(%f.Frame* noalias nonnull align 16 dereferenceable(80) %FramePtr) !dbg ![[RESUME_FN_DBG_NUM:[0-9]+]]
+; CHECK-NEXT: entry.resume:
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[XVAR_RESUME:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[YVAR_RESUME:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[ZVAR_RESUME:[0-9]+]],
+
+; CHECK: define internal fastcc void @f.destroy(%f.Frame* noalias nonnull align 16 dereferenceable(80) %FramePtr) !dbg ![[DESTROY_FN_DBG_NUM:[0-9]+]] {
+; CHECK-NEXT: entry.destroy:
+; CHECK-NEXT: %FramePtr.debug = alloca
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[XVAR_DESTROY:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[YVAR_DESTROY:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[ZVAR_DESTROY:[0-9]+]],
+
+; CHECK: define internal fastcc void @f.cleanup(%f.Frame* noalias nonnull align 16 dereferenceable(80) %FramePtr) !dbg ![[CLEANUP_FN_DBG_NUM:[0-9]+]] {
+; CHECK: entry.cleanup:
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[XVAR_CLEANUP:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[YVAR_CLEANUP:[0-9]+]],
+; CHECK: call void @llvm.dbg.addr(metadata %f.Frame** %FramePtr.debug, metadata ![[ZVAR_CLEANUP:[0-9]+]],
+
+; CHECK-DAG: ![[RESUME_FN_DBG_NUM]] = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov"
+; CHECK-DAG: ![[DESTROY_FN_DBG_NUM]] = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov"
+; CHECK-DAG: ![[CLEANUP_FN_DBG_NUM]] = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov"
+; CHECK-DAG: ![[XVAR_RESUME]] = !DILocalVariable(name: "x"
+; CHECK-DAG: ![[YVAR_RESUME]] = !DILocalVariable(name: "y"
+; CHECK-DAG: ![[ZVAR_RESUME]] = !DILocalVariable(name: "z"
+; CHECK-DAG: ![[XVAR_DESTROY]] = !DILocalVariable(name: "x"
+; CHECK-DAG: ![[YVAR_DESTROY]] = !DILocalVariable(name: "y"
+; CHECK-DAG: ![[ZVAR_DESTROY]] = !DILocalVariable(name: "z"
+; CHECK-DAG: ![[XVAR_CLEANUP]] = !DILocalVariable(name: "x"
+; CHECK-DAG: ![[YVAR_CLEANUP]] = !DILocalVariable(name: "y"
+; CHECK-DAG: ![[ZVAR_CLEANUP]] = !DILocalVariable(name: "z"
+
+source_filename = "../llvm/test/Transforms/Coroutines/coro-debug-dbg.values-O2.ll"
+declare void @consume(i32)
+
+define void @f(i32 %i, i32 %j, i8* %ptr) "coroutine.presplit"="0" !dbg !8 {
+entry:
+ %__promise = alloca i8, align 8
+ %x = alloca [10 x i32], align 16
+ %produced = call i32 @value_producer()
+ %id = call token @llvm.coro.id(i32 16, i8* %__promise, i8* null, i8* null)
+ %alloc = call i1 @llvm.coro.alloc(token %id)
+ br i1 %alloc, label %coro.alloc, label %coro.init
+
+coro.alloc: ; preds = %entry
+ %size = call i64 @llvm.coro.size.i64()
+ %memory = call i8* @new(i64 %size)
+ br label %coro.init
+
+coro.init: ; preds = %coro.alloc, %entry
+ %phi.entry.alloc = phi i8* [ null, %entry ], [ %memory, %coro.alloc ]
+ %begin = call i8* @llvm.coro.begin(token %id, i8* %phi.entry.alloc)
+ %ready = call i1 @await_ready()
+ br i1 %ready, label %init.ready, label %init.suspend
+
+init.suspend: ; preds = %coro.init
+ %save = call token @llvm.coro.save(i8* null)
+ call void @await_suspend()
+ %suspend = call i8 @llvm.coro.suspend(token %save, i1 false)
+ switch i8 %suspend, label %coro.ret [
+ i8 0, label %init.ready
+ i8 1, label %init.cleanup
+ ]
+
+init.cleanup: ; preds = %init.suspend
+ br label %cleanup
+
+init.ready: ; preds = %init.suspend, %coro.init
+ call void @await_resume()
+ %i.init.ready.inc = add nsw i32 0, 1
+ call void @llvm.dbg.addr(metadata [10 x i32]* %x, metadata !12, metadata !DIExpression()), !dbg !17
+ %memset = bitcast [10 x i32]* %x to i8*, !dbg !17
+ call void @llvm.memset.p0i8.i64(i8* align 16 %memset, i8 0, i64 40, i1 false), !dbg !17
+ call void @print(i32 %i.init.ready.inc)
+ call void @llvm.dbg.addr(metadata i8* %ptr, metadata !24, metadata !DIExpression()), !dbg !17
+ %ready.again = call zeroext i1 @await_ready()
+ br i1 %ready.again, label %await.ready, label %await.suspend
+
+await.suspend: ; preds = %init.ready
+ %save.again = call token @llvm.coro.save(i8* null)
+ %from.address = call i8* @from_address(i8* %begin)
+ call void @await_suspend()
+ %suspend.again = call i8 @llvm.coro.suspend(token %save.again, i1 false)
+ switch i8 %suspend.again, label %coro.ret [
+ i8 0, label %await.ready
+ i8 1, label %await.cleanup
+ ]
+
+await.cleanup: ; preds = %await.suspend
+ br label %cleanup
+
+await.ready: ; preds = %await.suspend, %init.ready
+ call void @await_resume()
+ %arrayidx0 = getelementptr inbounds [10 x i32], [10 x i32]* %x, i64 0, i64 0, !dbg !19
+ store i32 1, i32* %arrayidx0, align 16, !dbg !20
+ call void @llvm.dbg.addr(metadata i32* %arrayidx0, metadata !18, metadata !DIExpression()), !dbg !11
+ %arrayidx1 = getelementptr inbounds [10 x i32], [10 x i32]* %x, i64 0, i64 1, !dbg !21
+ store i32 2, i32* %arrayidx1, align 4, !dbg !22
+ %i.await.ready.inc = add nsw i32 %i.init.ready.inc, 1
+ call void @consume(i32 %produced)
+ call void @consume(i32 %i)
+ call void @consume(i32 %j)
+ call void @llvm.dbg.addr(metadata [10 x i32]* %x, metadata !23, metadata !DIExpression()), !dbg !17
+ call void @print(i32 %i.await.ready.inc)
+ call void @return_void()
+ br label %coro.final
+
+coro.final: ; preds = %await.ready
+ call void @final_suspend()
+ %coro.final.await_ready = call i1 @await_ready()
+ br i1 %coro.final.await_ready, label %final.ready, label %final.suspend
+
+final.suspend: ; preds = %coro.final
+ %final.suspend.coro.save = call token @llvm.coro.save(i8* null)
+ %final.suspend.from_address = call i8* @from_address(i8* %begin)
+ call void @await_suspend()
+ %final.suspend.coro.suspend = call i8 @llvm.coro.suspend(token %final.suspend.coro.save, i1 true)
+ switch i8 %final.suspend.coro.suspend, label %coro.ret [
+ i8 0, label %final.ready
+ i8 1, label %final.cleanup
+ ]
+
+final.cleanup: ; preds = %final.suspend
+ br label %cleanup
+
+final.ready: ; preds = %final.suspend, %coro.final
+ call void @await_resume()
+ br label %cleanup
+
+cleanup: ; preds = %final.ready, %final.cleanup, %await.cleanup, %init.cleanup
+ %cleanup.dest.slot.0 = phi i32 [ 0, %final.ready ], [ 2, %final.cleanup ], [ 2, %await.cleanup ], [ 2, %init.cleanup ]
+ %free.memory = call i8* @llvm.coro.free(token %id, i8* %begin)
+ %free = icmp ne i8* %free.memory, null
+ br i1 %free, label %coro.free, label %after.coro.free
+
+coro.free: ; preds = %cleanup
+ call void @delete(i8* %free.memory)
+ br label %after.coro.free
+
+after.coro.free: ; preds = %coro.free, %cleanup
+ switch i32 %cleanup.dest.slot.0, label %unreachable [
+ i32 0, label %cleanup.cont
+ i32 2, label %coro.ret
+ ]
+
+cleanup.cont: ; preds = %after.coro.free
+ br label %coro.ret
+
+coro.ret: ; preds = %cleanup.cont, %after.coro.free, %final.suspend, %await.suspend, %init.suspend
+ %end = call i1 @llvm.coro.end(i8* null, i1 false)
+ ret void
+
+unreachable: ; preds = %after.coro.free
+ unreachable
+}
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare void @llvm.dbg.declare(metadata, metadata, metadata) #0
+
+; Function Attrs: argmemonly nounwind readonly
+declare token @llvm.coro.id(i32, i8* readnone, i8* nocapture readonly, i8*) #1
+
+; Function Attrs: nounwind
+declare i1 @llvm.coro.alloc(token) #2
+
+; Function Attrs: nounwind readnone
+declare i64 @llvm.coro.size.i64() #3
+
+; Function Attrs: nounwind
+declare token @llvm.coro.save(i8*) #2
+
+; Function Attrs: nounwind
+declare i8* @llvm.coro.begin(token, i8* writeonly) #2
+
+; Function Attrs: nounwind
+declare i8 @llvm.coro.suspend(token, i1) #2
+
+; Function Attrs: argmemonly nounwind readonly
+declare i8* @llvm.coro.free(token, i8* nocapture readonly) #1
+
+; Function Attrs: nounwind
+declare i1 @llvm.coro.end(i8*, i1) #2
+
+declare i8* @new(i64)
+
+declare void @delete(i8*)
+
+declare i1 @await_ready()
+
+declare void @await_suspend()
+
+declare void @await_resume()
+
+declare void @print(i32)
+
+declare i8* @from_address(i8*)
+
+declare void @return_void()
+
+declare void @final_suspend()
+
+declare i32 @value_producer()
+
+; Function Attrs: argmemonly nofree nosync nounwind willreturn writeonly
+declare void @llvm.memset.p0i8.i64(i8* nocapture writeonly, i8, i64, i1 immarg) #4
+
+; Function Attrs: nofree nosync nounwind readnone speculatable willreturn
+declare void @llvm.dbg.value(metadata, metadata, metadata) #0
+
+declare void @llvm.dbg.addr(metadata, metadata, metadata) #0
+
+attributes #0 = { nofree nosync nounwind readnone speculatable willreturn }
+attributes #1 = { argmemonly nounwind readonly }
+attributes #2 = { nounwind }
+attributes #3 = { nounwind readnone }
+attributes #4 = { argmemonly nofree nosync nounwind willreturn writeonly }
+
+!llvm.dbg.cu = !{!0}
+!llvm.linker.options = !{}
+!llvm.module.flags = !{!3, !4}
+!llvm.ident = !{!5}
+
+!0 = distinct !DICompileUnit(language: DW_LANG_C_plus_plus_14, file: !1, producer: "clang version 11.0.0", isOptimized: false, runtimeVersion: 0, emissionKind: FullDebug, enums: !2, retainedTypes: !2, splitDebugInlining: false, nameTableKind: None)
+!1 = !DIFile(filename: "repro.cpp", directory: ".")
+!2 = !{}
+!3 = !{i32 7, !"Dwarf Version", i32 4}
+!4 = !{i32 2, !"Debug Info Version", i32 3}
+!5 = !{!"clang version 11.0.0"}
+!6 = !DILocalVariable(name: "i", scope: !7, file: !1, line: 24, type: !10)
+!7 = distinct !DILexicalBlock(scope: !8, file: !1, line: 23, column: 12)
+!8 = distinct !DISubprogram(name: "foo", linkageName: "_Z3foov", scope: !1, file: !1, line: 23, type: !9, scopeLine: 23, flags: DIFlagPrototyped, spFlags: DISPFlagDefinition, unit: !0, retainedNodes: !2)
+!9 = !DISubroutineType(types: !2)
+!10 = !DIBasicType(name: "int", size: 32, encoding: DW_ATE_signed)
+!11 = !DILocation(line: 0, scope: !7)
+!12 = !DILocalVariable(name: "x", scope: !13, file: !1, line: 34, type: !14)
+!13 = distinct !DILexicalBlock(scope: !8, file: !1, line: 23, column: 12)
+!14 = !DICompositeType(tag: DW_TAG_array_type, baseType: !10, size: 320, elements: !15)
+!15 = !{!16}
+!16 = !DISubrange(count: 10)
+!17 = !DILocation(line: 24, column: 7, scope: !7)
+!18 = !DILocalVariable(name: "y", scope: !7, file: !1, line: 32, type: !10)
+!19 = !DILocation(line: 42, column: 3, scope: !7)
+!20 = !DILocation(line: 42, column: 8, scope: !7)
+!21 = !DILocation(line: 43, column: 3, scope: !7)
+!22 = !DILocation(line: 43, column: 8, scope: !7)
+!23 = !DILocalVariable(name: "z", scope: !7, file: !1, line:24, type: !10)
+!24 = !DILocalVariable(name: "ptr", scope: !7, file: !1, line: 34, type: !10)
diff --git a/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
new file mode 100644
index 000000000000..a097219b6c93
--- /dev/null
+++ b/llvm/test/Transforms/InferAddressSpaces/AMDGPU/insert-pos-assert.ll
@@ -0,0 +1,33 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
+; RUN: opt -S -mtriple=amdgcn-amd-amdhsa -infer-address-spaces %s | FileCheck %s
+
+; Addrspacecasts must be inserted after the instructions that define their uses.
+
+%struct.s0 = type { i32*, i32 }
+%struct.s1 = type { %struct.s0 }
+
+@global0 = protected addrspace(4) externally_initialized global %struct.s1 zeroinitializer
+
+declare i32 @func(i32* %arg)
+
+define i32 @insert_pos_assert() {
+; CHECK-LABEL: @insert_pos_assert(
+; CHECK-NEXT: [[ALLOCA:%.*]] = alloca i32, align 4, addrspace(5)
+; CHECK-NEXT: [[LOAD0:%.*]] = load i32*, i32* addrspace(4)* getelementptr inbounds ([[STRUCT_S1:%.*]], [[STRUCT_S1]] addrspace(4)* @global0, i32 0, i32 0, i32 0), align 8
+; CHECK-NEXT: [[TMP1:%.*]] = addrspacecast i32* [[LOAD0]] to i32 addrspace(1)*
+; CHECK-NEXT: [[TMP2:%.*]] = addrspacecast i32 addrspace(1)* [[TMP1]] to i32*
+; CHECK-NEXT: [[LOAD1:%.*]] = load i32, i32 addrspace(5)* [[ALLOCA]], align 4
+; CHECK-NEXT: [[SEXT:%.*]] = sext i32 [[LOAD1]] to i64
+; CHECK-NEXT: [[GEP:%.*]] = getelementptr inbounds i32, i32* [[TMP2]], i64 [[SEXT]]
+; CHECK-NEXT: [[CALL:%.*]] = call i32 @func(i32* [[GEP]])
+; CHECK-NEXT: ret i32 [[CALL]]
+;
+ %alloca = alloca i32, align 4, addrspace(5)
+ %cast = addrspacecast i32 addrspace(5)* %alloca to i32*
+ %load0 = load i32*, i32* addrspace(4)* getelementptr inbounds (%struct.s1, %struct.s1 addrspace(4)* @global0, i32 0, i32 0, i32 0)
+ %load1 = load i32, i32* %cast
+ %sext = sext i32 %load1 to i64
+ %gep = getelementptr inbounds i32, i32* %load0, i64 %sext
+ %call = call i32 @func(i32* %gep)
+ ret i32 %call
+}
diff --git a/llvm/test/Transforms/Inline/always-inline.ll b/llvm/test/Transforms/Inline/always-inline.ll
index f947bdbd8734..37e890532375 100644
--- a/llvm/test/Transforms/Inline/always-inline.ll
+++ b/llvm/test/Transforms/Inline/always-inline.ll
@@ -314,3 +314,57 @@ define void @outer14() {
call void @inner14()
ret void
}
+
+define internal i32 @inner15() {
+; CHECK: @inner15(
+ ret i32 1
+}
+
+define i32 @outer15() {
+; CHECK-LABEL: @outer15(
+; CHECK: call
+
+ %r = call i32 @inner15() noinline
+ ret i32 %r
+}
+
+define internal i32 @inner16() alwaysinline {
+; CHECK: @inner16(
+ ret i32 1
+}
+
+define i32 @outer16() {
+; CHECK-LABEL: @outer16(
+; CHECK: call
+
+ %r = call i32 @inner16() noinline
+ ret i32 %r
+}
+
+define i32 @inner17() alwaysinline {
+; CHECK: @inner17(
+ ret i32 1
+}
+
+define i32 @outer17() {
+; CHECK-LABEL: @outer17(
+; CHECK: call
+
+ %r = call i32 @inner17() noinline
+ ret i32 %r
+}
+
+define i32 @inner18() noinline {
+; CHECK: @inner18(
+ ret i32 1
+}
+
+define i32 @outer18() {
+; CHECK-LABEL: @outer18(
+; CHECK-NOT: call
+; CHECK: ret
+
+ %r = call i32 @inner18() alwaysinline
+
+ ret i32 %r
+}
diff --git a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
index 6992c88799f0..537fe3cc8d9d 100644
--- a/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
+++ b/llvm/test/Transforms/LoopDistribute/symbolic-stride.ll
@@ -1,8 +1,9 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -basic-aa -loop-distribute -enable-loop-distribute -S < %s | \
-; RUN: FileCheck %s --check-prefix=ALL --check-prefix=STRIDE_SPEC
+; RUN: FileCheck %s --check-prefix=DEFAULT
; RUN: opt -basic-aa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | \
-; RUN: FileCheck %s --check-prefix=ALL --check-prefix=NO_STRIDE_SPEC
+; RUN: FileCheck %s --check-prefix=NO-VERSION
; If we don't speculate stride for 1 we can't distribute along the line
; because we could have a backward dependence:
@@ -16,19 +17,107 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
target triple = "x86_64-apple-macosx10.10.0"
-; ALL-LABEL: @f(
define void @f(i32* noalias %a,
- i32* noalias %b,
- i32* noalias %c,
- i32* noalias %d,
- i64 %stride) {
+;
+;
+; DEFAULT-LABEL: @f(
+; DEFAULT-NEXT: entry:
+; DEFAULT-NEXT: br label [[FOR_BODY_LVER_CHECK:%.*]]
+; DEFAULT: for.body.lver.check:
+; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
+; DEFAULT-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
+; DEFAULT: for.body.ph.lver.orig:
+; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; DEFAULT: for.body.lver.orig:
+; DEFAULT-NEXT: [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; DEFAULT-NEXT: [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
+; DEFAULT-NEXT: [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
+; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[IND_LVER_ORIG]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOADSTRIDEDA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADSTRIDEDA_LVER_ORIG]]
+; DEFAULT-NEXT: [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IND_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], 20
+; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; DEFAULT: for.body.ph.ldist1:
+; DEFAULT-NEXT: br label [[FOR_BODY_LDIST1:%.*]]
+; DEFAULT: for.body.ldist1:
+; DEFAULT-NEXT: [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
+; DEFAULT-NEXT: [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[IND_LDIST1]]
+; DEFAULT-NEXT: [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4
+; DEFAULT-NEXT: [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[IND_LDIST1]]
+; DEFAULT-NEXT: [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
+; DEFAULT-NEXT: [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
+; DEFAULT-NEXT: [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
+; DEFAULT-NEXT: [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
+; DEFAULT-NEXT: store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
+; DEFAULT-NEXT: [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], 20
+; DEFAULT-NEXT: br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
+; DEFAULT: for.body.ph:
+; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
+; DEFAULT: for.body:
+; DEFAULT-NEXT: [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
+; DEFAULT-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[IND]]
+; DEFAULT-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA]], align 4
+; DEFAULT-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
+; DEFAULT-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[IND]]
+; DEFAULT-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
+; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
+; DEFAULT: for.end.loopexit:
+; DEFAULT-NEXT: br label [[FOR_END:%.*]]
+; DEFAULT: for.end.loopexit1:
+; DEFAULT-NEXT: br label [[FOR_END]]
+; DEFAULT: for.end:
+; DEFAULT-NEXT: ret void
+;
+; NO-VERSION-LABEL: @f(
+; NO-VERSION-NEXT: entry:
+; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VERSION: for.body:
+; NO-VERSION-NEXT: [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; NO-VERSION-NEXT: [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4
+; NO-VERSION-NEXT: [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
+; NO-VERSION-NEXT: [[ADD]] = add nuw nsw i64 [[IND]], 1
+; NO-VERSION-NEXT: [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]]
+; NO-VERSION-NEXT: store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
+; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[IND]], [[STRIDE:%.*]]
+; NO-VERSION-NEXT: [[ARRAYIDXSTRIDEDA:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOADSTRIDEDA:%.*]] = load i32, i32* [[ARRAYIDXSTRIDEDA]], align 4
+; NO-VERSION-NEXT: [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADSTRIDEDA]]
+; NO-VERSION-NEXT: [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[IND]]
+; NO-VERSION-NEXT: store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
+; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], 20
+; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NO-VERSION: for.end:
+; NO-VERSION-NEXT: ret void
+;
+ i32* noalias %b,
+ i32* noalias %c,
+ i32* noalias %d,
+ i64 %stride) {
entry:
br label %for.body
-; STRIDE_SPEC: %ident.check = icmp ne i64 %stride, 1
-; STRIDE_SPEC: for.body.ldist1:
-; NO_STRIDE_SPEC-NOT: for.body.ldist1:
for.body: ; preds = %for.body, %entry
%ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
diff --git a/llvm/test/Transforms/LoopLoadElim/symbolic-stride.ll b/llvm/test/Transforms/LoopLoadElim/symbolic-stride.ll
index dce61157aae1..8165190fa10c 100644
--- a/llvm/test/Transforms/LoopLoadElim/symbolic-stride.ll
+++ b/llvm/test/Transforms/LoopLoadElim/symbolic-stride.ll
@@ -1,14 +1,12 @@
+; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
; RUN: opt -loop-load-elim -S < %s | \
-; RUN: FileCheck %s -check-prefix=ALL -check-prefix=ONE_STRIDE_SPEC \
-; RUN: -check-prefix=TWO_STRIDE_SPEC
+; RUN: FileCheck %s -check-prefix=DEFAULT
; RUN: opt -loop-load-elim -S -enable-mem-access-versioning=0 < %s | \
-; RUN: FileCheck %s -check-prefix=ALL -check-prefix=NO_ONE_STRIDE_SPEC \
-; RUN: -check-prefix=NO_TWO_STRIDE_SPEC
+; RUN: FileCheck %s -check-prefix=NO-VERSION
; RUN: opt -loop-load-elim -S -loop-load-elimination-scev-check-threshold=1 < %s | \
-; RUN: FileCheck %s -check-prefix=ALL -check-prefix=ONE_STRIDE_SPEC \
-; RUN: -check-prefix=NO_TWO_STRIDE_SPEC
+; RUN: FileCheck %s -check-prefix=THRESHOLD
; Forwarding in the presence of symbolic strides:
;
@@ -17,28 +15,130 @@
target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
-; ALL-LABEL: @f(
define void @f(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N,
- i64 %stride) {
+;
+;
+;
+;
+;
+; DEFAULT-LABEL: @f(
+; DEFAULT-NEXT: for.body.lver.check:
+; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
+; DEFAULT-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; DEFAULT: for.body.ph.lver.orig:
+; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; DEFAULT: for.body.lver.orig:
+; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
+; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; DEFAULT: for.body.ph:
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
+; DEFAULT: for.body:
+; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
+; DEFAULT-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
+; DEFAULT: for.end.loopexit:
+; DEFAULT-NEXT: br label [[FOR_END:%.*]]
+; DEFAULT: for.end.loopexit1:
+; DEFAULT-NEXT: br label [[FOR_END]]
+; DEFAULT: for.end:
+; DEFAULT-NEXT: ret void
+;
+; NO-VERSION-LABEL: @f(
+; NO-VERSION-NEXT: entry:
+; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VERSION: for.body:
+; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE:%.*]]
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
+; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
+; NO-VERSION-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NO-VERSION: for.end:
+; NO-VERSION-NEXT: ret void
+;
+; THRESHOLD-LABEL: @f(
+; THRESHOLD-NEXT: for.body.lver.check:
+; THRESHOLD-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
+; THRESHOLD-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; THRESHOLD: for.body.ph.lver.orig:
+; THRESHOLD-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; THRESHOLD: for.body.lver.orig:
+; THRESHOLD-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; THRESHOLD-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
+; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
+; THRESHOLD-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; THRESHOLD-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
+; THRESHOLD-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; THRESHOLD: for.body.ph:
+; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; THRESHOLD-NEXT: br label [[FOR_BODY:%.*]]
+; THRESHOLD: for.body:
+; THRESHOLD-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
+; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[INDVARS_IV_NEXT]]
+; THRESHOLD-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
+; THRESHOLD: for.end.loopexit:
+; THRESHOLD-NEXT: br label [[FOR_END:%.*]]
+; THRESHOLD: for.end.loopexit1:
+; THRESHOLD-NEXT: br label [[FOR_END]]
+; THRESHOLD: for.end:
+; THRESHOLD-NEXT: ret void
+;
+ i64 %stride) {
-; ONE_STRIDE_SPEC: %ident.check = icmp ne i64 %stride, 1
entry:
-; NO_ONE_STRIDE_SPEC-NOT: %load_initial = load i32, i32* %A
-; ONE_STRIDE_SPEC: %load_initial = load i32, i32* %A
br label %for.body
for.body: ; preds = %for.body, %entry
-; NO_ONE_STRIDE_SPEC-NOT: %store_forwarded = phi i32 [ %load_initial, {{.*}} ], [ %add, %for.body ]
-; ONE_STRIDE_SPEC: %store_forwarded = phi i32 [ %load_initial, {{.*}} ], [ %add, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
%load = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%load_1 = load i32, i32* %arrayidx2, align 4
-; NO_ONE_STRIDE_SPEC-NOT: %add = add i32 %load_1, %store_forwarded
-; ONE_STRIDE_SPEC: %add = add i32 %load_1, %store_forwarded
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%arrayidx_next = getelementptr inbounds i32, i32* %A, i64 %indvars.iv.next
@@ -51,20 +151,139 @@ for.end: ; preds = %for.body
}
; Similar to @f(), but with a struct type.
-; ALL-LABEL: @f_struct(
define void @f_struct({ i32, i8 } * noalias nocapture %A, { i32, i8 }* noalias nocapture readonly %B, i64 %N,
- i64 %stride) {
+;
+;
+;
+;
+;
+; DEFAULT-LABEL: @f_struct(
+; DEFAULT-NEXT: for.body.lver.check:
+; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
+; DEFAULT-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; DEFAULT: for.body.ph.lver.orig:
+; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; DEFAULT: for.body.lver.orig:
+; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[V1_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_LVER_ORIG]], 0
+; DEFAULT-NEXT: [[V2_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_1_LVER_ORIG]], 0
+; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[V1_LVER_ORIG]], [[V2_LVER_ORIG]]
+; DEFAULT-NEXT: [[INS_LVER_ORIG:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD_LVER_ORIG]], 0
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; DEFAULT-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], { i32, i8 }* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
+; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; DEFAULT: for.body.ph:
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, { i32, i8 }* [[A]], align 4
+; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
+; DEFAULT: for.body:
+; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi { i32, i8 } [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[INS:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[STORE_FORWARDED]], 0
+; DEFAULT-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
+; DEFAULT-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
+; DEFAULT-NEXT: [[INS]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
+; DEFAULT-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
+; DEFAULT: for.end.loopexit:
+; DEFAULT-NEXT: br label [[FOR_END:%.*]]
+; DEFAULT: for.end.loopexit1:
+; DEFAULT-NEXT: br label [[FOR_END]]
+; DEFAULT: for.end:
+; DEFAULT-NEXT: ret void
+;
+; NO-VERSION-LABEL: @f_struct(
+; NO-VERSION-NEXT: entry:
+; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VERSION: for.body:
+; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE:%.*]]
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[LOAD]], 0
+; NO-VERSION-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
+; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
+; NO-VERSION-NEXT: [[INS:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
+; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
+; NO-VERSION-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NO-VERSION: for.end:
+; NO-VERSION-NEXT: ret void
+;
+; THRESHOLD-LABEL: @f_struct(
+; THRESHOLD-NEXT: for.body.lver.check:
+; THRESHOLD-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE:%.*]], 1
+; THRESHOLD-NEXT: br i1 [[IDENT_CHECK]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; THRESHOLD: for.body.ph.lver.orig:
+; THRESHOLD-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; THRESHOLD: for.body.lver.orig:
+; THRESHOLD-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; THRESHOLD-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE]]
+; THRESHOLD-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; THRESHOLD-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[V1_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_LVER_ORIG]], 0
+; THRESHOLD-NEXT: [[V2_LVER_ORIG:%.*]] = extractvalue { i32, i8 } [[LOAD_1_LVER_ORIG]], 0
+; THRESHOLD-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[V1_LVER_ORIG]], [[V2_LVER_ORIG]]
+; THRESHOLD-NEXT: [[INS_LVER_ORIG:%.*]] = insertvalue { i32, i8 } undef, i32 [[ADD_LVER_ORIG]], 0
+; THRESHOLD-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT_LVER_ORIG]]
+; THRESHOLD-NEXT: store { i32, i8 } [[INS_LVER_ORIG]], { i32, i8 }* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; THRESHOLD-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
+; THRESHOLD-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; THRESHOLD: for.body.ph:
+; THRESHOLD-NEXT: [[LOAD_INITIAL:%.*]] = load { i32, i8 }, { i32, i8 }* [[A]], align 4
+; THRESHOLD-NEXT: br label [[FOR_BODY:%.*]]
+; THRESHOLD: for.body:
+; THRESHOLD-NEXT: [[STORE_FORWARDED:%.*]] = phi { i32, i8 } [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[INS:%.*]], [[FOR_BODY]] ]
+; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE]]
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[B]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load { i32, i8 }, { i32, i8 }* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[V1:%.*]] = extractvalue { i32, i8 } [[STORE_FORWARDED]], 0
+; THRESHOLD-NEXT: [[V2:%.*]] = extractvalue { i32, i8 } [[LOAD_1]], 0
+; THRESHOLD-NEXT: [[ADD:%.*]] = add i32 [[V1]], [[V2]]
+; THRESHOLD-NEXT: [[INS]] = insertvalue { i32, i8 } undef, i32 [[ADD]], 0
+; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds { i32, i8 }, { i32, i8 }* [[A]], i64 [[INDVARS_IV_NEXT]]
+; THRESHOLD-NEXT: store { i32, i8 } [[INS]], { i32, i8 }* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT1:%.*]], label [[FOR_BODY]]
+; THRESHOLD: for.end.loopexit:
+; THRESHOLD-NEXT: br label [[FOR_END:%.*]]
+; THRESHOLD: for.end.loopexit1:
+; THRESHOLD-NEXT: br label [[FOR_END]]
+; THRESHOLD: for.end:
+; THRESHOLD-NEXT: ret void
+;
+ i64 %stride) {
-; ONE_STRIDE_SPEC: %ident.check = icmp ne i64 %stride, 1
entry:
-; NO_ONE_STRIDE_SPEC-NOT: %load_initial = load { i32, i8 }, { i32, i8 }* %A
-; ONE_STRIDE_SPEC: %load_initial = load { i32, i8 }, { i32, i8 }* %A
br label %for.body
for.body: ; preds = %for.body, %entry
-; NO_ONE_STRIDE_SPEC-NOT: %store_forwarded = phi { i32, i8 } [ %load_initial, {{.*}} ], [ %ins, %for.body ]
-; ONE_STRIDE_SPEC: %store_forwarded = phi { i32, i8 } [ %load_initial, {{.*}} ], [ %ins, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride
%arrayidx = getelementptr inbounds { i32, i8 }, { i32, i8 }* %A, i64 %mul
@@ -72,9 +291,6 @@ for.body: ; preds = %for.body, %entry
%arrayidx2 = getelementptr inbounds { i32, i8 }, { i32, i8 }* %B, i64 %indvars.iv
%load_1 = load { i32, i8 }, { i32, i8 }* %arrayidx2, align 4
-; NO_ONE_STRIDE_SPEC-NOT: %v1 = extractvalue { i32, i8 } %store_forwarded
-; ONE_STRIDE_SPEC: %v1 = extractvalue { i32, i8 } %store_forwarded
-; ONE_STRIDE_SPEC: %add = add i32 %v1, %v2
%v1 = extractvalue { i32, i8 } %load, 0
%v2 = extractvalue { i32, i8} %load_1, 0
@@ -95,30 +311,113 @@ for.end: ; preds = %for.body
; for (unsigned i = 0; i < 100; i++)
; A[Stride2 * (i + 1)] = A[Stride1 * i] + B[i];
-; ALL-LABEL: @two_strides(
define void @two_strides(i32* noalias nocapture %A, i32* noalias nocapture readonly %B, i64 %N,
- i64 %stride.1, i64 %stride.2) {
+;
+;
+;
+;
+;
+;
+; DEFAULT-LABEL: @two_strides(
+; DEFAULT-NEXT: for.body.lver.check:
+; DEFAULT-NEXT: [[IDENT_CHECK:%.*]] = icmp ne i64 [[STRIDE_2:%.*]], 1
+; DEFAULT-NEXT: [[IDENT_CHECK1:%.*]] = icmp ne i64 [[STRIDE_1:%.*]], 1
+; DEFAULT-NEXT: [[TMP0:%.*]] = or i1 [[IDENT_CHECK]], [[IDENT_CHECK1]]
+; DEFAULT-NEXT: br i1 [[TMP0]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH:%.*]]
+; DEFAULT: for.body.ph.lver.orig:
+; DEFAULT-NEXT: br label [[FOR_BODY_LVER_ORIG:%.*]]
+; DEFAULT: for.body.lver.orig:
+; DEFAULT-NEXT: [[INDVARS_IV_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INDVARS_IV_NEXT_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
+; DEFAULT-NEXT: [[MUL_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_LVER_ORIG]], [[STRIDE_1]]
+; DEFAULT-NEXT: [[ARRAYIDX_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV_LVER_ORIG]]
+; DEFAULT-NEXT: [[LOAD_1_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDX2_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[ADD_LVER_ORIG:%.*]] = add i32 [[LOAD_1_LVER_ORIG]], [[LOAD_LVER_ORIG]]
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT_LVER_ORIG]] = add nuw nsw i64 [[INDVARS_IV_LVER_ORIG]], 1
+; DEFAULT-NEXT: [[MUL_2_LVER_ORIG:%.*]] = mul i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[STRIDE_2]]
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2_LVER_ORIG]]
+; DEFAULT-NEXT: store i32 [[ADD_LVER_ORIG]], i32* [[ARRAYIDX_NEXT_LVER_ORIG]], align 4
+; DEFAULT-NEXT: [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT_LVER_ORIG]], [[N:%.*]]
+; DEFAULT-NEXT: br i1 [[EXITCOND_LVER_ORIG]], label [[FOR_END_LOOPEXIT:%.*]], label [[FOR_BODY_LVER_ORIG]]
+; DEFAULT: for.body.ph:
+; DEFAULT-NEXT: [[LOAD_INITIAL:%.*]] = load i32, i32* [[A]], align 4
+; DEFAULT-NEXT: br label [[FOR_BODY:%.*]]
+; DEFAULT: for.body:
+; DEFAULT-NEXT: [[STORE_FORWARDED:%.*]] = phi i32 [ [[LOAD_INITIAL]], [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; DEFAULT-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1]]
+; DEFAULT-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL]]
+; DEFAULT-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; DEFAULT-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[INDVARS_IV]]
+; DEFAULT-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; DEFAULT-NEXT: [[ADD]] = add i32 [[LOAD_1]], [[STORE_FORWARDED]]
+; DEFAULT-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; DEFAULT-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2]]
+; DEFAULT-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
+; DEFAULT-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; DEFAULT-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N]]
+; DEFAULT-NEXT: br i1 [[EXITCOND]], label [[FOR_END_LOOPEXIT2:%.*]], label [[FOR_BODY]]
+; DEFAULT: for.end.loopexit:
+; DEFAULT-NEXT: br label [[FOR_END:%.*]]
+; DEFAULT: for.end.loopexit2:
+; DEFAULT-NEXT: br label [[FOR_END]]
+; DEFAULT: for.end:
+; DEFAULT-NEXT: ret void
+;
+; NO-VERSION-LABEL: @two_strides(
+; NO-VERSION-NEXT: entry:
+; NO-VERSION-NEXT: br label [[FOR_BODY:%.*]]
+; NO-VERSION: for.body:
+; NO-VERSION-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; NO-VERSION-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1:%.*]]
+; NO-VERSION-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
+; NO-VERSION-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; NO-VERSION-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
+; NO-VERSION-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; NO-VERSION-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
+; NO-VERSION-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; NO-VERSION-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2:%.*]]
+; NO-VERSION-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
+; NO-VERSION-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; NO-VERSION-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; NO-VERSION-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; NO-VERSION: for.end:
+; NO-VERSION-NEXT: ret void
+;
+; THRESHOLD-LABEL: @two_strides(
+; THRESHOLD-NEXT: entry:
+; THRESHOLD-NEXT: br label [[FOR_BODY:%.*]]
+; THRESHOLD: for.body:
+; THRESHOLD-NEXT: [[INDVARS_IV:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[INDVARS_IV_NEXT:%.*]], [[FOR_BODY]] ]
+; THRESHOLD-NEXT: [[MUL:%.*]] = mul i64 [[INDVARS_IV]], [[STRIDE_1:%.*]]
+; THRESHOLD-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL]]
+; THRESHOLD-NEXT: [[LOAD:%.*]] = load i32, i32* [[ARRAYIDX]], align 4
+; THRESHOLD-NEXT: [[ARRAYIDX2:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[INDVARS_IV]]
+; THRESHOLD-NEXT: [[LOAD_1:%.*]] = load i32, i32* [[ARRAYIDX2]], align 4
+; THRESHOLD-NEXT: [[ADD:%.*]] = add i32 [[LOAD_1]], [[LOAD]]
+; THRESHOLD-NEXT: [[INDVARS_IV_NEXT]] = add nuw nsw i64 [[INDVARS_IV]], 1
+; THRESHOLD-NEXT: [[MUL_2:%.*]] = mul i64 [[INDVARS_IV_NEXT]], [[STRIDE_2:%.*]]
+; THRESHOLD-NEXT: [[ARRAYIDX_NEXT:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_2]]
+; THRESHOLD-NEXT: store i32 [[ADD]], i32* [[ARRAYIDX_NEXT]], align 4
+; THRESHOLD-NEXT: [[EXITCOND:%.*]] = icmp eq i64 [[INDVARS_IV_NEXT]], [[N:%.*]]
+; THRESHOLD-NEXT: br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
+; THRESHOLD: for.end:
+; THRESHOLD-NEXT: ret void
+;
+ i64 %stride.1, i64 %stride.2) {
-; TWO_STRIDE_SPEC: %ident.check = icmp ne i64 %stride.2, 1
-; TWO_STRIDE_SPEC: %ident.check1 = icmp ne i64 %stride.1, 1
-; NO_TWO_STRIDE_SPEC-NOT: %ident.check{{.*}} = icmp ne i64 %stride{{.*}}, 1
entry:
-; NO_TWO_STRIDE_SPEC-NOT: %load_initial = load i32, i32* %A
-; TWO_STRIDE_SPEC: %load_initial = load i32, i32* %A
br label %for.body
for.body: ; preds = %for.body, %entry
-; NO_TWO_STRIDE_SPEC-NOT: %store_forwarded = phi i32 [ %load_initial, {{.*}} ], [ %add, %for.body ]
-; TWO_STRIDE_SPEC: %store_forwarded = phi i32 [ %load_initial, {{.*}} ], [ %add, %for.body ]
%indvars.iv = phi i64 [ 0, %entry ], [ %indvars.iv.next, %for.body ]
%mul = mul i64 %indvars.iv, %stride.1
%arrayidx = getelementptr inbounds i32, i32* %A, i64 %mul
%load = load i32, i32* %arrayidx, align 4
%arrayidx2 = getelementptr inbounds i32, i32* %B, i64 %indvars.iv
%load_1 = load i32, i32* %arrayidx2, align 4
-; NO_TWO_STRIDE_SPEC-NOT: %add = add i32 %load_1, %store_forwarded
-; TWO_STRIDE_SPEC: %add = add i32 %load_1, %store_forwarded
%add = add i32 %load_1, %load
%indvars.iv.next = add nuw nsw i64 %indvars.iv, 1
%mul.2 = mul i64 %indvars.iv.next, %stride.2
diff --git a/llvm/test/Transforms/OpenMP/barrier_removal.ll b/llvm/test/Transforms/OpenMP/barrier_removal.ll
index 8c7cca8053e9..7de421f728f1 100644
--- a/llvm/test/Transforms/OpenMP/barrier_removal.ll
+++ b/llvm/test/Transforms/OpenMP/barrier_removal.ll
@@ -66,8 +66,9 @@ define void @pos_empty_6() {
call i32 @llvm.nvvm.barrier0.popc(i32 0)
ret void
}
-define void @pos_empty_7() {
-; CHECK-LABEL: define {{[^@]+}}@pos_empty_7() {
+define void @neg_empty_7() {
+; CHECK-LABEL: define {{[^@]+}}@neg_empty_7() {
+; CHECK-NEXT: call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: ret void
;
call void @llvm.amdgcn.s.barrier()
@@ -211,6 +212,7 @@ define void @neg_mem() {
define void @pos_multiple() {
; CHECK-LABEL: define {{[^@]+}}@pos_multiple() {
+; CHECK-NEXT: call void @llvm.amdgcn.s.barrier()
; CHECK-NEXT: ret void
;
call void @llvm.nvvm.barrier0()
@@ -233,7 +235,7 @@ define void @pos_multiple() {
!3 = !{void ()* @pos_empty_4, !"kernel", i32 1}
!4 = !{void ()* @pos_empty_5, !"kernel", i32 1}
!5 = !{void ()* @pos_empty_6, !"kernel", i32 1}
-!6 = !{void ()* @pos_empty_7, !"kernel", i32 1}
+!6 = !{void ()* @neg_empty_7, !"kernel", i32 1}
!7 = !{void ()* @pos_constant_loads, !"kernel", i32 1}
!8 = !{void ()* @neg_loads, !"kernel", i32 1}
!9 = !{void ()* @pos_priv_mem, !"kernel", i32 1}
@@ -254,7 +256,7 @@ define void @pos_multiple() {
; CHECK: [[META5:![0-9]+]] = !{void ()* @pos_empty_4, !"kernel", i32 1}
; CHECK: [[META6:![0-9]+]] = !{void ()* @pos_empty_5, !"kernel", i32 1}
; CHECK: [[META7:![0-9]+]] = !{void ()* @pos_empty_6, !"kernel", i32 1}
-; CHECK: [[META8:![0-9]+]] = !{void ()* @pos_empty_7, !"kernel", i32 1}
+; CHECK: [[META8:![0-9]+]] = !{void ()* @neg_empty_7, !"kernel", i32 1}
; CHECK: [[META9:![0-9]+]] = !{void ()* @pos_constant_loads, !"kernel", i32 1}
; CHECK: [[META10:![0-9]+]] = !{void ()* @neg_loads, !"kernel", i32 1}
; CHECK: [[META11:![0-9]+]] = !{void ()* @pos_priv_mem, !"kernel", i32 1}
diff --git a/llvm/test/Verifier/llvm.fptrunc.round.ll b/llvm/test/Verifier/llvm.fptrunc.round.ll
new file mode 100644
index 000000000000..17ca81a77d69
--- /dev/null
+++ b/llvm/test/Verifier/llvm.fptrunc.round.ll
@@ -0,0 +1,13 @@
+; RUN: not opt -verify < %s 2>&1 | FileCheck %s
+
+declare half @llvm.fptrunc.round(float, metadata)
+
+define void @test_fptrunc_round_dynamic(float %a) {
+; CHECK: unsupported rounding mode argument
+ %res = call half @llvm.fptrunc.round(float %a, metadata !"round.dynamic")
+; CHECK: unsupported rounding mode argument
+ %res1 = call half @llvm.fptrunc.round(float %a, metadata !"round.test")
+; CHECK: invalid value for llvm.fptrunc.round metadata operand (the operand should be a string)
+ %res2 = call half @llvm.fptrunc.round(float %a, metadata i32 5)
+ ret void
+}
diff --git a/llvm/test/tools/dsymutil/Inputs/reflection_metadata.yaml b/llvm/test/tools/dsymutil/Inputs/reflection_metadata.yaml
index 791b914eaf73..964de15ce1ae 100644
--- a/llvm/test/tools/dsymutil/Inputs/reflection_metadata.yaml
+++ b/llvm/test/tools/dsymutil/Inputs/reflection_metadata.yaml
@@ -16,12 +16,12 @@ FileHeader:
cpusubtype: 0x3
filetype: 0x1
ncmds: 8
- sizeofcmds: 2960
+ sizeofcmds: 3040
flags: 0x2000
reserved: 0x0
LoadCommands:
- cmd: LC_SEGMENT_64
- cmdsize: 2712
+ cmdsize: 2792
segname: ''
vmaddr: 0
vmsize: 21352
@@ -36,7 +36,7 @@ LoadCommands:
segname: __TEXT
addr: 0x0
size: 4571
- offset: 0xBB0
+ offset: 0xC00
align: 4
reloff: 0x5CF8
nreloc: 74
@@ -56,7 +56,7 @@ LoadCommands:
segname: __TEXT
addr: 0x11DC
size: 117
- offset: 0x1D8C
+ offset: 0x1DDC
align: 1
reloff: 0x5F48
nreloc: 22
@@ -77,7 +77,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1254
size: 24
- offset: 0x1E04
+ offset: 0x1E54
align: 2
reloff: 0x5FF8
nreloc: 6
@@ -98,7 +98,7 @@ LoadCommands:
segname: __TEXT
addr: 0x17D8
size: 37
- offset: 0x2388
+ offset: 0x23D8
align: 0
reloff: 0x0
nreloc: 0
@@ -110,7 +110,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1800
size: 24
- offset: 0x23B0
+ offset: 0x2400
align: 2
reloff: 0x6530
nreloc: 8
@@ -131,7 +131,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1818
size: 260
- offset: 0x23C8
+ offset: 0x2418
align: 2
reloff: 0x6570
nreloc: 60
@@ -152,7 +152,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1AC8
size: 20
- offset: 0x2678
+ offset: 0x26C8
align: 2
reloff: 0x67F8
nreloc: 2
@@ -173,7 +173,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1AEC
size: 10
- offset: 0x269C
+ offset: 0x26EC
align: 2
reloff: 0x0
nreloc: 0
@@ -185,7 +185,7 @@ LoadCommands:
segname: __TEXT
addr: 0x1AF8
size: 10
- offset: 0x26C0
+ offset: 0x2710
align: 2
reloff: 0x0
nreloc: 0
@@ -193,11 +193,23 @@ LoadCommands:
reserved1: 0x0
reserved2: 0x0
content: 51525354555657585960
+ - sectname: __swift5_acfuncs
+ segname: __TEXT
+ addr: 0x1B04
+ size: 10
+ offset: 0x2734
+ align: 2
+ reloff: 0x0
+ nreloc: 0
+ flags: 0x10000000
+ reserved1: 0x0
+ reserved2: 0x0
+ content: 61626364656667686970
- sectname: __bss
segname: __DATA
addr: 0x3372
size: 2084
- offset: 0x5150
+ offset: 0x51D0
align: 3
reloff: 0x0
nreloc: 0
diff --git a/llvm/test/tools/dsymutil/X86/reflection-dump.test b/llvm/test/tools/dsymutil/X86/reflection-dump.test
index f3daa75adbb6..e94af7e54582 100644
--- a/llvm/test/tools/dsymutil/X86/reflection-dump.test
+++ b/llvm/test/tools/dsymutil/X86/reflection-dump.test
@@ -48,3 +48,6 @@ CHECK-NEXT: 10000e258 41424344 45464748 4950 ABCDEFGHIP
CHECK: Contents of section __DWARF,__swift5_protos:
CHECK-NEXT: 10000e264 51525354 55565758 5960 QRSTUVWXY`
+
+CHECK: Contents of section __DWARF,__swift5_acfuncs:
+CHECK-NEXT: 10000e270 61626364 65666768 6970 abcdefghip
diff --git a/llvm/test/tools/llvm-profdata/cs-sample-nested-profile.test b/llvm/test/tools/llvm-profdata/cs-sample-nested-profile.test
index fe28f57676b0..03d15671f1fd 100644
--- a/llvm/test/tools/llvm-profdata/cs-sample-nested-profile.test
+++ b/llvm/test/tools/llvm-profdata/cs-sample-nested-profile.test
@@ -6,8 +6,12 @@ RUN: llvm-profdata merge --sample --extbinary -output=%t.profbin %S/Inputs/cs-sa
RUN: llvm-profdata merge --sample --text -output=%t2.proftext %t.profbin
RUN: FileCheck %s < %t2.proftext --match-full-lines --strict-whitespace
RUN: llvm-profdata show --sample -show-sec-info-only %t.profbin | FileCheck %s -check-prefix=PREINLINE
-RUN: llvm-profdata merge --sample --text -output=%t.proftext %S/Inputs/cs-sample-preinline.proftext --gen-cs-nested-profile=1 -generate-merged-base-profiles=1
-RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-prefix=RECOUNT
+RUN: llvm-profdata merge --sample --text -output=%t3.proftext %S/Inputs/cs-sample-preinline.proftext --gen-cs-nested-profile=1 -generate-merged-base-profiles=1
+RUN: FileCheck %s < %t3.proftext --match-full-lines --strict-whitespace -check-prefix=RECOUNT
+RUN: llvm-profdata merge --sample --extbinary -output=%t2.profbin %S/Inputs/cs-sample-preinline.proftext --gen-cs-nested-profile=1 -generate-merged-base-profiles=1
+RUN: llvm-profdata show -sample -detailed-summary %S/Inputs/cs-sample-preinline.proftext | FileCheck %s -check-prefix=SUMMARY
+RUN: llvm-profdata show -sample -detailed-summary %t2.profbin | FileCheck %s -check-prefix=SUMMARY
+RUN: llvm-profdata show -sample -detailed-summary %t3.proftext | FileCheck %s -check-prefix=SUMMARY
; CHECK:main:1968679:12
@@ -60,8 +64,8 @@ RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-pr
; RECOUNT-NEXT: 3: 287884
; RECOUNT-NEXT: 4: 287864 _Z3fibi:315608
; RECOUNT-NEXT: 15: 23
-; RECOUNT-NEXT: !Attributes: 2
-; RECOUNT-NEXT: !Attributes: 2
+; RECOUNT-NEXT: !Attributes: 6
+; RECOUNT-NEXT: !Attributes: 6
; RECOUNT-NEXT: 3.1: _Z5funcBi:500973
; RECOUNT-NEXT: 0: 19
; RECOUNT-NEXT: 1: 19 _Z8funcLeafi:20
@@ -74,8 +78,8 @@ RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-pr
; RECOUNT-NEXT: 10: 23324
; RECOUNT-NEXT: 11: 23327 _Z3fibi:25228
; RECOUNT-NEXT: 15: 11
-; RECOUNT-NEXT: !Attributes: 2
-; RECOUNT-NEXT: !Attributes: 2
+; RECOUNT-NEXT: !Attributes: 6
+; RECOUNT-NEXT: !Attributes: 6
; RECOUNT-NEXT:_Z8funcLeafi:1968152:31
; RECOUNT-NEXT: 0: 21
; RECOUNT-NEXT: 1: 21
@@ -95,7 +99,7 @@ RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-pr
; RECOUNT-NEXT: 3: 287884
; RECOUNT-NEXT: 4: 287864 _Z3fibi:315608
; RECOUNT-NEXT: 15: 23
-; RECOUNT-NEXT: !Attributes: 2
+; RECOUNT-NEXT: !Attributes: 6
; RECOUNT-NEXT: !Attributes: 2
; RECOUNT-NEXT:_Z5funcBi:501213:32
; RECOUNT-NEXT: 0: 32
@@ -109,7 +113,7 @@ RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-pr
; RECOUNT-NEXT: 10: 23324
; RECOUNT-NEXT: 11: 23327 _Z3fibi:25228
; RECOUNT-NEXT: 15: 11
-; RECOUNT-NEXT: !Attributes: 2
+; RECOUNT-NEXT: !Attributes: 6
; PROBE:main:1968679:12
; PROBE-NEXT: 2: 24
@@ -153,3 +157,27 @@ RUN: FileCheck %s < %t.proftext --match-full-lines --strict-whitespace -check-pr
; PREINLINE: ProfileSummarySection {{.*}} Flags: {context-nested}
+
+
+; SUMMARY: Total functions: 4
+; SUMMARY-NEXT: Maximum function count: 32
+; SUMMARY-NEXT: Maximum block count: 362830
+; SUMMARY-NEXT: Total number of blocks: 16
+; SUMMARY-NEXT: Total count: 772562
+; SUMMARY-NEXT: Detailed summary:
+; SUMMARY-NEXT: 1 blocks with count >= 362830 account for 1 percentage of the total counts.
+; SUMMARY-NEXT: 1 blocks with count >= 362830 account for 10 percentage of the total counts.
+; SUMMARY-NEXT: 1 blocks with count >= 362830 account for 20 percentage of the total counts.
+; SUMMARY-NEXT: 1 blocks with count >= 362830 account for 30 percentage of the total counts.
+; SUMMARY-NEXT: 1 blocks with count >= 362830 account for 40 percentage of the total counts.
+; SUMMARY-NEXT: 2 blocks with count >= 362805 account for 50 percentage of the total counts.
+; SUMMARY-NEXT: 2 blocks with count >= 362805 account for 60 percentage of the total counts.
+; SUMMARY-NEXT: 2 blocks with count >= 362805 account for 70 percentage of the total counts.
+; SUMMARY-NEXT: 2 blocks with count >= 362805 account for 80 percentage of the total counts.
+; SUMMARY-NEXT: 2 blocks with count >= 362805 account for 90 percentage of the total counts.
+; SUMMARY-NEXT: 3 blocks with count >= 23327 account for 95 percentage of the total counts.
+; SUMMARY-NEXT: 4 blocks with count >= 23324 account for 99 percentage of the total counts.
+; SUMMARY-NEXT: 4 blocks with count >= 23324 account for 99.9 percentage of the total counts.
+; SUMMARY-NEXT: 11 blocks with count >= 24 account for 99.99 percentage of the total counts.
+; SUMMARY-NEXT: 16 blocks with count >= 10 account for 99.999 percentage of the total counts.
+; SUMMARY-NEXT: 16 blocks with count >= 10 account for 99.9999 percentage of the total counts.
diff --git a/llvm/test/tools/llvm-profgen/cs-preinline.test b/llvm/test/tools/llvm-profgen/cs-preinline.test
index 2ade9cb0ce43..8afa811674fd 100644
--- a/llvm/test/tools/llvm-profgen/cs-preinline.test
+++ b/llvm/test/tools/llvm-profgen/cs-preinline.test
@@ -65,4 +65,4 @@
; CHECK-PREINL-NEST-NEXT: 65526: 14
; CHECK-PREINL-NEST-NEXT: 3.1: bar:84
; CHECK-PREINL-NEST-NEXT: 1: 14
-; CHECK-PREINL-NEST-NEXT: !Attributes: 3
+; CHECK-PREINL-NEST-NEXT: !Attributes: 7
diff --git a/llvm/unittests/IR/ConstantsTest.cpp b/llvm/unittests/IR/ConstantsTest.cpp
index faf8502b19df..98975dcf18c8 100644
--- a/llvm/unittests/IR/ConstantsTest.cpp
+++ b/llvm/unittests/IR/ConstantsTest.cpp
@@ -469,9 +469,11 @@ TEST(ConstantsTest, BuildConstantDataVectors) {
}
}
-TEST(ConstantsTest, BitcastToGEP) {
+void bitcastToGEPHelper(bool useOpaquePointers) {
LLVMContext Context;
std::unique_ptr<Module> M(new Module("MyModule", Context));
+ if (useOpaquePointers)
+ Context.enableOpaquePointers();
auto *i32 = Type::getInt32Ty(Context);
auto *U = StructType::create(Context, "Unsized");
@@ -490,6 +492,11 @@ TEST(ConstantsTest, BitcastToGEP) {
}
}
+TEST(ConstantsTest, BitcastToGEP) {
+ bitcastToGEPHelper(true);
+ bitcastToGEPHelper(false);
+}
+
bool foldFuncPtrAndConstToNull(LLVMContext &Context, Module *TheModule,
uint64_t AndValue,
MaybeAlign FunctionAlign = llvm::None) {
diff --git a/llvm/unittests/IR/DebugInfoTest.cpp b/llvm/unittests/IR/DebugInfoTest.cpp
index 8605fbbcbd40..524752168b09 100644
--- a/llvm/unittests/IR/DebugInfoTest.cpp
+++ b/llvm/unittests/IR/DebugInfoTest.cpp
@@ -247,6 +247,45 @@ TEST(DIBuilder, CreateSetType) {
EXPECT_TRUE(isa_and_nonnull<DIDerivedType>(SetType));
}
+TEST(DIBuilder, CreateStringType) {
+ LLVMContext Ctx;
+ std::unique_ptr<Module> M(new Module("MyModule", Ctx));
+ DIBuilder DIB(*M);
+ DIScope *Scope = DISubprogram::getDistinct(
+ Ctx, nullptr, "", "", nullptr, 0, nullptr, 0, nullptr, 0, 0,
+ DINode::FlagZero, DISubprogram::SPFlagZero, nullptr);
+ DIFile *F = DIB.createFile("main.c", "/");
+ StringRef StrName = "string";
+ DIVariable *StringLen = DIB.createAutoVariable(Scope, StrName, F, 0, nullptr,
+ false, DINode::FlagZero, 0);
+ auto getDIExpression = [&DIB](int offset) {
+ SmallVector<uint64_t, 4> ops;
+ ops.push_back(llvm::dwarf::DW_OP_push_object_address);
+ DIExpression::appendOffset(ops, offset);
+ ops.push_back(llvm::dwarf::DW_OP_deref);
+
+ return DIB.createExpression(ops);
+ };
+ DIExpression *StringLocationExp = getDIExpression(1);
+ DIStringType *StringType =
+ DIB.createStringType(StrName, StringLen, StringLocationExp);
+
+ EXPECT_TRUE(isa_and_nonnull<DIStringType>(StringType));
+ EXPECT_EQ(StringType->getName(), StrName);
+ EXPECT_EQ(StringType->getStringLength(), StringLen);
+ EXPECT_EQ(StringType->getStringLocationExp(), StringLocationExp);
+
+ StringRef StrNameExp = "stringexp";
+ DIExpression *StringLengthExp = getDIExpression(2);
+ DIStringType *StringTypeExp =
+ DIB.createStringType(StrNameExp, StringLengthExp, StringLocationExp);
+
+ EXPECT_TRUE(isa_and_nonnull<DIStringType>(StringTypeExp));
+ EXPECT_EQ(StringTypeExp->getName(), StrNameExp);
+ EXPECT_EQ(StringTypeExp->getStringLocationExp(), StringLocationExp);
+ EXPECT_EQ(StringTypeExp->getStringLengthExp(), StringLengthExp);
+}
+
TEST(DIBuilder, DIEnumerator) {
LLVMContext Ctx;
std::unique_ptr<Module> M(new Module("MyModule", Ctx));
diff --git a/llvm/unittests/Support/CommandLineTest.cpp b/llvm/unittests/Support/CommandLineTest.cpp
index 2e007bfff187..9a7f4f38740f 100644
--- a/llvm/unittests/Support/CommandLineTest.cpp
+++ b/llvm/unittests/Support/CommandLineTest.cpp
@@ -743,21 +743,24 @@ TEST(CommandLineTest, ArgumentLimit) {
EXPECT_FALSE(llvm::sys::commandLineFitsWithinSystemLimits("cl", args.data()));
std::string args2(256, 'a');
EXPECT_TRUE(llvm::sys::commandLineFitsWithinSystemLimits("cl", args2.data()));
- if (Triple(sys::getProcessTriple()).isOSWindows()) {
- // We use 32000 as a limit for command line length. Program name ('cl'),
- // separating spaces and termination null character occupy 5 symbols.
- std::string long_arg(32000 - 5, 'b');
- EXPECT_TRUE(
- llvm::sys::commandLineFitsWithinSystemLimits("cl", long_arg.data()));
- long_arg += 'b';
- EXPECT_FALSE(
- llvm::sys::commandLineFitsWithinSystemLimits("cl", long_arg.data()));
- }
+}
+
+TEST(CommandLineTest, ArgumentLimitWindows) {
+ if (!Triple(sys::getProcessTriple()).isOSWindows())
+ GTEST_SKIP();
+ // We use 32000 as a limit for command line length. Program name ('cl'),
+ // separating spaces and termination null character occupy 5 symbols.
+ std::string long_arg(32000 - 5, 'b');
+ EXPECT_TRUE(
+ llvm::sys::commandLineFitsWithinSystemLimits("cl", long_arg.data()));
+ long_arg += 'b';
+ EXPECT_FALSE(
+ llvm::sys::commandLineFitsWithinSystemLimits("cl", long_arg.data()));
}
TEST(CommandLineTest, ResponseFileWindows) {
if (!Triple(sys::getProcessTriple()).isOSWindows())
- return;
+ GTEST_SKIP();
StackOption<std::string, cl::list<std::string>> InputFilenames(
cl::Positional, cl::desc("<input files>"), cl::ZeroOrMore);
diff --git a/llvm/unittests/Support/Host.cpp b/llvm/unittests/Support/Host.cpp
index 888cfb2658e4..0a0adcebc43d 100644
--- a/llvm/unittests/Support/Host.cpp
+++ b/llvm/unittests/Support/Host.cpp
@@ -47,13 +47,18 @@ protected:
HostTest() : Host(Triple::normalize(sys::getProcessTriple())) {}
};
-TEST_F(HostTest, NumPhysicalCores) {
+TEST_F(HostTest, NumPhysicalCoresSupported) {
+ if (!isSupportedArchAndOS())
+ GTEST_SKIP();
int Num = sys::getHostNumPhysicalCores();
+ ASSERT_GT(Num, 0);
+}
+TEST_F(HostTest, NumPhysicalCoresUnsupported) {
if (isSupportedArchAndOS())
- ASSERT_GT(Num, 0);
- else
- ASSERT_EQ(Num, -1);
+ GTEST_SKIP();
+ int Num = sys::getHostNumPhysicalCores();
+ ASSERT_EQ(Num, -1);
}
TEST(getLinuxHostCPUName, ARM) {
@@ -412,7 +417,7 @@ TEST_F(HostTest, DummyRunAndGetCommandOutputUse) {
TEST_F(HostTest, getMacOSHostVersion) {
llvm::Triple HostTriple(llvm::sys::getProcessTriple());
if (!HostTriple.isMacOSX())
- return;
+ GTEST_SKIP();
const char *SwVersPath = "/usr/bin/sw_vers";
StringRef argv[] = {SwVersPath, "-productVersion"};
@@ -441,14 +446,8 @@ TEST_F(HostTest, getMacOSHostVersion) {
}
}
-TEST_F(HostTest, AIXVersionDetect) {
- llvm::Triple HostTriple(llvm::sys::getProcessTriple());
- if (HostTriple.getOS() != Triple::AIX)
- return;
-
- llvm::Triple ConfiguredHostTriple(LLVM_HOST_TRIPLE);
- ASSERT_EQ(ConfiguredHostTriple.getOS(), Triple::AIX);
-
+// Helper to return AIX system version. Must return void to use ASSERT_*.
+static void getAIXSystemVersion(VersionTuple &SystemVersion) {
const char *ExePath = "/usr/bin/oslevel";
StringRef argv[] = {ExePath};
std::unique_ptr<char[]> Buffer;
@@ -456,28 +455,50 @@ TEST_F(HostTest, AIXVersionDetect) {
ASSERT_EQ(runAndGetCommandOutput(ExePath, argv, Buffer, Size), true);
StringRef SystemVersionStr = StringRef(Buffer.get(), Size).rtrim();
- VersionTuple SystemVersion =
+ SystemVersion =
llvm::Triple((Twine("powerpc-ibm-aix") + SystemVersionStr))
.getOSVersion();
+}
+
+TEST_F(HostTest, AIXHostVersionDetect) {
+ llvm::Triple HostTriple(llvm::sys::getProcessTriple());
+ if (HostTriple.getOS() != Triple::AIX)
+ GTEST_SKIP();
+
+ llvm::Triple ConfiguredHostTriple(LLVM_HOST_TRIPLE);
+ ASSERT_EQ(ConfiguredHostTriple.getOS(), Triple::AIX);
+
+ VersionTuple SystemVersion;
+ getAIXSystemVersion(SystemVersion);
// Ensure that the host triple version (major) and release (minor) numbers,
// unless explicitly configured, match with those of the current system.
- if (!ConfiguredHostTriple.getOSMajorVersion()) {
- VersionTuple HostVersion = HostTriple.getOSVersion();
- ASSERT_EQ(SystemVersion.getMajor(), HostVersion.getMajor());
- ASSERT_EQ(SystemVersion.getMinor(), HostVersion.getMinor());
+ auto SysMajor = SystemVersion.getMajor();
+ auto SysMinor = SystemVersion.getMinor();
+ VersionTuple HostVersion = HostTriple.getOSVersion();
+ if (ConfiguredHostTriple.getOSMajorVersion()) {
+ // Explicitly configured, force a match. We do it this way so the
+ // asserts are always executed.
+ SysMajor = HostVersion.getMajor();
+ SysMinor = HostVersion.getMinor();
}
+ ASSERT_EQ(SysMajor, HostVersion.getMajor());
+ ASSERT_EQ(SysMinor, HostVersion.getMinor());
+}
+TEST_F(HostTest, AIXTargetVersionDetect) {
llvm::Triple TargetTriple(llvm::sys::getDefaultTargetTriple());
if (TargetTriple.getOS() != Triple::AIX)
- return;
+ GTEST_SKIP();
// Ensure that the target triple version (major) and release (minor) numbers
// match with those of the current system.
llvm::Triple ConfiguredTargetTriple(LLVM_DEFAULT_TARGET_TRIPLE);
if (ConfiguredTargetTriple.getOSMajorVersion())
- return; // The version was configured explicitly; skip.
+ GTEST_SKIP(); // The version was configured explicitly; skip.
+ VersionTuple SystemVersion;
+ getAIXSystemVersion(SystemVersion);
VersionTuple TargetVersion = TargetTriple.getOSVersion();
ASSERT_EQ(SystemVersion.getMajor(), TargetVersion.getMajor());
ASSERT_EQ(SystemVersion.getMinor(), TargetVersion.getMinor());
@@ -486,7 +507,7 @@ TEST_F(HostTest, AIXVersionDetect) {
TEST_F(HostTest, AIXHostCPUDetect) {
llvm::Triple HostTriple(llvm::sys::getProcessTriple());
if (HostTriple.getOS() != Triple::AIX)
- return;
+ GTEST_SKIP();
// Return a value based on the current processor implementation mode.
const char *ExePath = "/usr/sbin/getsystype";
diff --git a/llvm/utils/TableGen/CMakeLists.txt b/llvm/utils/TableGen/CMakeLists.txt
index 97df6a55d1b5..339692bcd651 100644
--- a/llvm/utils/TableGen/CMakeLists.txt
+++ b/llvm/utils/TableGen/CMakeLists.txt
@@ -49,6 +49,7 @@ add_tablegen(llvm-tblgen LLVM
SubtargetFeatureInfo.cpp
TableGen.cpp
Types.cpp
+ VarLenCodeEmitterGen.cpp
X86DisassemblerTables.cpp
X86EVEX2VEXTablesEmitter.cpp
X86FoldTablesEmitter.cpp
diff --git a/llvm/utils/TableGen/CodeEmitterGen.cpp b/llvm/utils/TableGen/CodeEmitterGen.cpp
index fbac0d969917..f446e5fe4414 100644
--- a/llvm/utils/TableGen/CodeEmitterGen.cpp
+++ b/llvm/utils/TableGen/CodeEmitterGen.cpp
@@ -16,6 +16,7 @@
#include "CodeGenTarget.h"
#include "SubtargetFeatureInfo.h"
#include "Types.h"
+#include "VarLenCodeEmitterGen.h"
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/StringExtras.h"
@@ -396,132 +397,140 @@ void CodeEmitterGen::run(raw_ostream &o) {
ArrayRef<const CodeGenInstruction*> NumberedInstructions =
Target.getInstructionsByEnumValue();
- const CodeGenHwModes &HWM = Target.getHwModes();
- // The set of HwModes used by instruction encodings.
- std::set<unsigned> HwModes;
- BitWidth = 0;
- for (const CodeGenInstruction *CGI : NumberedInstructions) {
- Record *R = CGI->TheDef;
- if (R->getValueAsString("Namespace") == "TargetOpcode" ||
- R->getValueAsBit("isPseudo"))
- continue;
+ if (any_of(NumberedInstructions, [](const CodeGenInstruction *CGI) {
+ Record *R = CGI->TheDef;
+ return R->getValue("Inst") && isa<DagInit>(R->getValueInit("Inst"));
+ })) {
+ emitVarLenCodeEmitter(Records, o);
+ } else {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ // The set of HwModes used by instruction encodings.
+ std::set<unsigned> HwModes;
+ BitWidth = 0;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
- if (const RecordVal *RV = R->getValue("EncodingInfos")) {
- if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
- EncodingInfoByHwMode EBM(DI->getDef(), HWM);
- for (auto &KV : EBM) {
- BitsInit *BI = KV.second->getValueAsBitsInit("Inst");
- BitWidth = std::max(BitWidth, BI->getNumBits());
- HwModes.insert(KV.first);
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (DefInit *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM) {
+ BitsInit *BI = KV.second->getValueAsBitsInit("Inst");
+ BitWidth = std::max(BitWidth, BI->getNumBits());
+ HwModes.insert(KV.first);
+ }
+ continue;
}
- continue;
}
+ BitsInit *BI = R->getValueAsBitsInit("Inst");
+ BitWidth = std::max(BitWidth, BI->getNumBits());
}
- BitsInit *BI = R->getValueAsBitsInit("Inst");
- BitWidth = std::max(BitWidth, BI->getNumBits());
- }
- UseAPInt = BitWidth > 64;
-
- // Emit function declaration
- if (UseAPInt) {
- o << "void " << Target.getName()
- << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
- << " SmallVectorImpl<MCFixup> &Fixups,\n"
- << " APInt &Inst,\n"
- << " APInt &Scratch,\n"
- << " const MCSubtargetInfo &STI) const {\n";
- } else {
- o << "uint64_t " << Target.getName();
- o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
- << " SmallVectorImpl<MCFixup> &Fixups,\n"
- << " const MCSubtargetInfo &STI) const {\n";
- }
-
- // Emit instruction base values
- if (HwModes.empty()) {
- emitInstructionBaseValues(o, NumberedInstructions, Target, -1);
- } else {
- for (unsigned HwMode : HwModes)
- emitInstructionBaseValues(o, NumberedInstructions, Target, (int)HwMode);
- }
+ UseAPInt = BitWidth > 64;
- if (!HwModes.empty()) {
- o << " const uint64_t *InstBits;\n";
- o << " unsigned HwMode = STI.getHwMode();\n";
- o << " switch (HwMode) {\n";
- o << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
- for (unsigned I : HwModes) {
- o << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
- << "; break;\n";
+ // Emit function declaration
+ if (UseAPInt) {
+ o << "void " << Target.getName()
+ << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " APInt &Inst,\n"
+ << " APInt &Scratch,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
+ } else {
+ o << "uint64_t " << Target.getName();
+ o << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
}
- o << " };\n";
- }
- // Map to accumulate all the cases.
- std::map<std::string, std::vector<std::string>> CaseMap;
+ // Emit instruction base values
+ if (HwModes.empty()) {
+ emitInstructionBaseValues(o, NumberedInstructions, Target, -1);
+ } else {
+ for (unsigned HwMode : HwModes)
+ emitInstructionBaseValues(o, NumberedInstructions, Target, (int)HwMode);
+ }
- // Construct all cases statement for each opcode
- for (Record *R : Insts) {
- if (R->getValueAsString("Namespace") == "TargetOpcode" ||
- R->getValueAsBit("isPseudo"))
- continue;
- std::string InstName =
- (R->getValueAsString("Namespace") + "::" + R->getName()).str();
- std::string Case = getInstructionCase(R, Target);
+ if (!HwModes.empty()) {
+ o << " const uint64_t *InstBits;\n";
+ o << " unsigned HwMode = STI.getHwMode();\n";
+ o << " switch (HwMode) {\n";
+ o << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
+ for (unsigned I : HwModes) {
+ o << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
+ << "; break;\n";
+ }
+ o << " };\n";
+ }
- CaseMap[Case].push_back(std::move(InstName));
- }
+ // Map to accumulate all the cases.
+ std::map<std::string, std::vector<std::string>> CaseMap;
- // Emit initial function code
- if (UseAPInt) {
- int NumWords = APInt::getNumWords(BitWidth);
- int NumBytes = (BitWidth + 7) / 8;
- o << " const unsigned opcode = MI.getOpcode();\n"
- << " if (Inst.getBitWidth() != " << BitWidth << ")\n"
- << " Inst = Inst.zext(" << BitWidth << ");\n"
- << " if (Scratch.getBitWidth() != " << BitWidth << ")\n"
- << " Scratch = Scratch.zext(" << BitWidth << ");\n"
- << " LoadIntFromMemory(Inst, (const uint8_t *)&InstBits[opcode * "
- << NumWords << "], " << NumBytes << ");\n"
- << " APInt &Value = Inst;\n"
- << " APInt &op = Scratch;\n"
- << " switch (opcode) {\n";
- } else {
- o << " const unsigned opcode = MI.getOpcode();\n"
- << " uint64_t Value = InstBits[opcode];\n"
- << " uint64_t op = 0;\n"
- << " (void)op; // suppress warning\n"
- << " switch (opcode) {\n";
- }
+ // Construct all cases statement for each opcode
+ for (Record *R : Insts) {
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+ std::string InstName =
+ (R->getValueAsString("Namespace") + "::" + R->getName()).str();
+ std::string Case = getInstructionCase(R, Target);
+
+ CaseMap[Case].push_back(std::move(InstName));
+ }
+
+ // Emit initial function code
+ if (UseAPInt) {
+ int NumWords = APInt::getNumWords(BitWidth);
+ int NumBytes = (BitWidth + 7) / 8;
+ o << " const unsigned opcode = MI.getOpcode();\n"
+ << " if (Inst.getBitWidth() != " << BitWidth << ")\n"
+ << " Inst = Inst.zext(" << BitWidth << ");\n"
+ << " if (Scratch.getBitWidth() != " << BitWidth << ")\n"
+ << " Scratch = Scratch.zext(" << BitWidth << ");\n"
+ << " LoadIntFromMemory(Inst, (const uint8_t *)&InstBits[opcode * "
+ << NumWords << "], " << NumBytes << ");\n"
+ << " APInt &Value = Inst;\n"
+ << " APInt &op = Scratch;\n"
+ << " switch (opcode) {\n";
+ } else {
+ o << " const unsigned opcode = MI.getOpcode();\n"
+ << " uint64_t Value = InstBits[opcode];\n"
+ << " uint64_t op = 0;\n"
+ << " (void)op; // suppress warning\n"
+ << " switch (opcode) {\n";
+ }
- // Emit each case statement
- std::map<std::string, std::vector<std::string>>::iterator IE, EE;
- for (IE = CaseMap.begin(), EE = CaseMap.end(); IE != EE; ++IE) {
- const std::string &Case = IE->first;
- std::vector<std::string> &InstList = IE->second;
+ // Emit each case statement
+ std::map<std::string, std::vector<std::string>>::iterator IE, EE;
+ for (IE = CaseMap.begin(), EE = CaseMap.end(); IE != EE; ++IE) {
+ const std::string &Case = IE->first;
+ std::vector<std::string> &InstList = IE->second;
- for (int i = 0, N = InstList.size(); i < N; i++) {
- if (i) o << "\n";
- o << " case " << InstList[i] << ":";
+ for (int i = 0, N = InstList.size(); i < N; i++) {
+ if (i)
+ o << "\n";
+ o << " case " << InstList[i] << ":";
+ }
+ o << " {\n";
+ o << Case;
+ o << " break;\n"
+ << " }\n";
}
- o << " {\n";
- o << Case;
- o << " break;\n"
- << " }\n";
- }
- // Default case: unhandled opcode
- o << " default:\n"
- << " std::string msg;\n"
- << " raw_string_ostream Msg(msg);\n"
- << " Msg << \"Not supported instr: \" << MI;\n"
- << " report_fatal_error(msg.c_str());\n"
- << " }\n";
- if (UseAPInt)
- o << " Inst = Value;\n";
- else
- o << " return Value;\n";
- o << "}\n\n";
+ // Default case: unhandled opcode
+ o << " default:\n"
+ << " std::string msg;\n"
+ << " raw_string_ostream Msg(msg);\n"
+ << " Msg << \"Not supported instr: \" << MI;\n"
+ << " report_fatal_error(Msg.str().c_str());\n"
+ << " }\n";
+ if (UseAPInt)
+ o << " Inst = Value;\n";
+ else
+ o << " return Value;\n";
+ o << "}\n\n";
+ }
const auto &All = SubtargetFeatureInfo::getAll(Records);
std::map<Record *, SubtargetFeatureInfo, LessRecordByID> SubtargetFeatures;
diff --git a/llvm/utils/TableGen/CodeGenRegisters.cpp b/llvm/utils/TableGen/CodeGenRegisters.cpp
index f6fa9e31f0e1..dd72e85e27b9 100644
--- a/llvm/utils/TableGen/CodeGenRegisters.cpp
+++ b/llvm/utils/TableGen/CodeGenRegisters.cpp
@@ -204,12 +204,16 @@ namespace {
class RegUnitIterator {
CodeGenRegister::Vec::const_iterator RegI, RegE;
CodeGenRegister::RegUnitList::iterator UnitI, UnitE;
+ static CodeGenRegister::RegUnitList Sentinel;
public:
RegUnitIterator(const CodeGenRegister::Vec &Regs):
RegI(Regs.begin()), RegE(Regs.end()) {
- if (RegI != RegE) {
+ if (RegI == RegE) {
+ UnitI = Sentinel.end();
+ UnitE = Sentinel.end();
+ } else {
UnitI = (*RegI)->getRegUnits().begin();
UnitE = (*RegI)->getRegUnits().end();
advance();
@@ -240,6 +244,8 @@ protected:
}
};
+CodeGenRegister::RegUnitList RegUnitIterator::Sentinel;
+
} // end anonymous namespace
// Return true of this unit appears in RegUnits.
diff --git a/llvm/utils/TableGen/RegisterInfoEmitter.cpp b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
index ecbb61806d4a..eb4b7b20a5c4 100644
--- a/llvm/utils/TableGen/RegisterInfoEmitter.cpp
+++ b/llvm/utils/TableGen/RegisterInfoEmitter.cpp
@@ -1046,25 +1046,24 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
RegClassStrings.add(Name);
- // Emit the register list now.
- OS << " // " << Name << " Register Class...\n"
- << " const MCPhysReg " << Name
- << "[] = {\n ";
- for (Record *Reg : Order) {
- OS << getQualifiedName(Reg) << ", ";
- }
- OS << "\n };\n\n";
-
- OS << " // " << Name << " Bit set.\n"
- << " const uint8_t " << Name
- << "Bits[] = {\n ";
- BitVectorEmitter BVE;
- for (Record *Reg : Order) {
- BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
- }
- BVE.print(OS);
- OS << "\n };\n\n";
+ // Emit the register list now (unless it would be a zero-length array).
+ if (!Order.empty()) {
+ OS << " // " << Name << " Register Class...\n"
+ << " const MCPhysReg " << Name << "[] = {\n ";
+ for (Record *Reg : Order) {
+ OS << getQualifiedName(Reg) << ", ";
+ }
+ OS << "\n };\n\n";
+ OS << " // " << Name << " Bit set.\n"
+ << " const uint8_t " << Name << "Bits[] = {\n ";
+ BitVectorEmitter BVE;
+ for (Record *Reg : Order) {
+ BVE.add(Target.getRegBank().getReg(Reg)->EnumValue);
+ }
+ BVE.print(OS);
+ OS << "\n };\n\n";
+ }
}
OS << "} // end anonymous namespace\n\n";
@@ -1076,14 +1075,17 @@ RegisterInfoEmitter::runMCDesc(raw_ostream &OS, CodeGenTarget &Target,
<< "MCRegisterClasses[] = {\n";
for (const auto &RC : RegisterClasses) {
+ ArrayRef<Record *> Order = RC.getOrder();
+ std::string RCName = Order.empty() ? "nullptr" : RC.getName();
+ std::string RCBitsName = Order.empty() ? "nullptr" : RC.getName() + "Bits";
+ std::string RCBitsSize = Order.empty() ? "0" : "sizeof(" + RCBitsName + ")";
assert(isInt<8>(RC.CopyCost) && "Copy cost too large.");
uint32_t RegSize = 0;
if (RC.RSI.isSimple())
RegSize = RC.RSI.getSimple().RegSize;
- OS << " { " << RC.getName() << ", " << RC.getName() << "Bits, "
+ OS << " { " << RCName << ", " << RCBitsName << ", "
<< RegClassStrings.get(RC.getName()) << ", " << RC.getOrder().size()
- << ", sizeof(" << RC.getName() << "Bits), "
- << RC.getQualifiedName() + "RegClassID"
+ << ", " << RCBitsSize << ", " << RC.getQualifiedName() + "RegClassID"
<< ", " << RegSize << ", " << RC.CopyCost << ", "
<< (RC.Allocatable ? "true" : "false") << " },\n";
}
diff --git a/llvm/utils/TableGen/VarLenCodeEmitterGen.cpp b/llvm/utils/TableGen/VarLenCodeEmitterGen.cpp
new file mode 100644
index 000000000000..832c9053ffb9
--- /dev/null
+++ b/llvm/utils/TableGen/VarLenCodeEmitterGen.cpp
@@ -0,0 +1,491 @@
+//===- VarLenCodeEmitterGen.cpp - CEG for variable-length insts -----------===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// The CodeEmitterGen component for variable-length instructions.
+//
+// The basic CodeEmitterGen is almost exclusively designed for fixed-
+// length instructions. A good analogy for its encoding scheme is how printf
+// works: The (immutable) formatting string represent the fixed values in the
+// encoded instruction. Placeholders (i.e. %something), on the other hand,
+// represent encoding for instruction operands.
+// ```
+// printf("1101 %src 1001 %dst", <encoded value for operand `src`>,
+// <encoded value for operand `dst`>);
+// ```
+// VarLenCodeEmitterGen in this file provides an alternative encoding scheme
+// that works more like a C++ stream operator:
+// ```
+// OS << 0b1101;
+// if (Cond)
+// OS << OperandEncoding0;
+// OS << 0b1001 << OperandEncoding1;
+// ```
+// You are free to concatenate arbitrary types (and sizes) of encoding
+// fragments on any bit position, bringing more flexibilities on defining
+// encoding for variable-length instructions.
+//
+// In a more specific way, instruction encoding is represented by a DAG type
+// `Inst` field. Here is an example:
+// ```
+// dag Inst = (descend 0b1101, (operand "$src", 4), 0b1001,
+// (operand "$dst", 4));
+// ```
+// It represents the following instruction encoding:
+// ```
+// MSB LSB
+// 1101<encoding for operand src>1001<encoding for operand dst>
+// ```
+// For more details about DAG operators in the above snippet, please
+// refer to \file include/llvm/Target/Target.td.
+//
+// VarLenCodeEmitter will convert the above DAG into the same helper function
+// generated by CodeEmitter, `MCCodeEmitter::getBinaryCodeForInstr` (except
+// for few details).
+//
+//===----------------------------------------------------------------------===//
+
+#include "VarLenCodeEmitterGen.h"
+#include "CodeGenInstruction.h"
+#include "CodeGenTarget.h"
+#include "SubtargetFeatureInfo.h"
+#include "llvm/ADT/ArrayRef.h"
+#include "llvm/ADT/DenseMap.h"
+#include "llvm/Support/raw_ostream.h"
+#include "llvm/TableGen/Error.h"
+#include "llvm/TableGen/Record.h"
+
+using namespace llvm;
+
+namespace {
+
+class VarLenCodeEmitterGen {
+ RecordKeeper &Records;
+
+ class VarLenInst {
+ size_t NumBits;
+
+ // Set if any of the segment is not fixed value.
+ bool HasDynamicSegment;
+
+ // {Number of bits, Value}
+ SmallVector<std::pair<unsigned, const Init *>, 4> Segments;
+
+ void buildRec(const DagInit *DI);
+
+ public:
+ VarLenInst() : NumBits(0U), HasDynamicSegment(false) {}
+
+ explicit VarLenInst(const DagInit *DI);
+
+ /// Number of bits
+ size_t size() const { return NumBits; }
+
+ using const_iterator = decltype(Segments)::const_iterator;
+
+ const_iterator begin() const { return Segments.begin(); }
+ const_iterator end() const { return Segments.end(); }
+ size_t getNumSegments() const { return Segments.size(); }
+
+ bool isFixedValueOnly() const { return !HasDynamicSegment; }
+ };
+
+ DenseMap<Record *, VarLenInst> VarLenInsts;
+
+ // Emit based values (i.e. fixed bits in the encoded instructions)
+ void emitInstructionBaseValues(
+ raw_ostream &OS,
+ ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode = -1);
+
+ std::string getInstructionCase(Record *R, CodeGenTarget &Target);
+ std::string getInstructionCaseForEncoding(Record *R, Record *EncodingDef,
+ CodeGenTarget &Target);
+
+public:
+ explicit VarLenCodeEmitterGen(RecordKeeper &R) : Records(R) {}
+
+ void run(raw_ostream &OS);
+};
+
+} // end anonymous namespace
+
+VarLenCodeEmitterGen::VarLenInst::VarLenInst(const DagInit *DI) : NumBits(0U) {
+ buildRec(DI);
+ for (const auto &S : Segments)
+ NumBits += S.first;
+}
+
+void VarLenCodeEmitterGen::VarLenInst::buildRec(const DagInit *DI) {
+ std::string Op = DI->getOperator()->getAsString();
+
+ if (Op == "ascend" || Op == "descend") {
+ bool Reverse = Op == "descend";
+ int i = Reverse ? DI->getNumArgs() - 1 : 0;
+ int e = Reverse ? -1 : DI->getNumArgs();
+ int s = Reverse ? -1 : 1;
+ for (; i != e; i += s) {
+ const Init *Arg = DI->getArg(i);
+ if (const auto *BI = dyn_cast<BitsInit>(Arg)) {
+ if (!BI->isComplete())
+ PrintFatalError("Expecting complete bits init in `" + Op + "`");
+ Segments.push_back({BI->getNumBits(), BI});
+ } else if (const auto *BI = dyn_cast<BitInit>(Arg)) {
+ if (!BI->isConcrete())
+ PrintFatalError("Expecting concrete bit init in `" + Op + "`");
+ Segments.push_back({1, BI});
+ } else if (const auto *SubDI = dyn_cast<DagInit>(Arg)) {
+ buildRec(SubDI);
+ } else {
+ PrintFatalError("Unrecognized type of argument in `" + Op +
+ "`: " + Arg->getAsString());
+ }
+ }
+ } else if (Op == "operand") {
+ // (operand <operand name>, <# of bits>)
+ if (DI->getNumArgs() != 2)
+ PrintFatalError("Expecting 2 arguments for `operand`");
+ HasDynamicSegment = true;
+ const Init *OperandName = DI->getArg(0), *NumBits = DI->getArg(1);
+ if (!isa<StringInit>(OperandName) || !isa<IntInit>(NumBits))
+ PrintFatalError("Invalid argument types for `operand`");
+
+ auto NumBitsVal = cast<IntInit>(NumBits)->getValue();
+ if (NumBitsVal <= 0)
+ PrintFatalError("Invalid number of bits for `operand`");
+
+ Segments.push_back({NumBitsVal, OperandName});
+ } else if (Op == "slice") {
+ // (slice <operand name>, <high / low bit>, <low / high bit>)
+ if (DI->getNumArgs() != 3)
+ PrintFatalError("Expecting 3 arguments for `slice`");
+ HasDynamicSegment = true;
+ Init *OperandName = DI->getArg(0), *HiBit = DI->getArg(1),
+ *LoBit = DI->getArg(2);
+ if (!isa<StringInit>(OperandName) || !isa<IntInit>(HiBit) ||
+ !isa<IntInit>(LoBit))
+ PrintFatalError("Invalid argument types for `slice`");
+
+ auto HiBitVal = cast<IntInit>(HiBit)->getValue(),
+ LoBitVal = cast<IntInit>(LoBit)->getValue();
+ if (HiBitVal < 0 || LoBitVal < 0)
+ PrintFatalError("Invalid bit range for `slice`");
+ bool NeedSwap = false;
+ unsigned NumBits = 0U;
+ if (HiBitVal < LoBitVal) {
+ NeedSwap = true;
+ NumBits = static_cast<unsigned>(LoBitVal - HiBitVal + 1);
+ } else {
+ NumBits = static_cast<unsigned>(HiBitVal - LoBitVal + 1);
+ }
+
+ if (NeedSwap) {
+ // Normalization: Hi bit should always be the second argument.
+ Init *const NewArgs[] = {OperandName, LoBit, HiBit};
+ Segments.push_back(
+ {NumBits, DagInit::get(DI->getOperator(), nullptr, NewArgs, {})});
+ } else {
+ Segments.push_back({NumBits, DI});
+ }
+ }
+}
+
+void VarLenCodeEmitterGen::run(raw_ostream &OS) {
+ CodeGenTarget Target(Records);
+ auto Insts = Records.getAllDerivedDefinitions("Instruction");
+
+ auto NumberedInstructions = Target.getInstructionsByEnumValue();
+ const CodeGenHwModes &HWM = Target.getHwModes();
+
+ // The set of HwModes used by instruction encodings.
+ std::set<unsigned> HwModes;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+
+ // Create the corresponding VarLenInst instance.
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ for (auto &KV : EBM) {
+ HwModes.insert(KV.first);
+ Record *EncodingDef = KV.second;
+ auto *DI = EncodingDef->getValueAsDag("Inst");
+ VarLenInsts.insert({EncodingDef, VarLenInst(DI)});
+ }
+ continue;
+ }
+ }
+ auto *DI = R->getValueAsDag("Inst");
+ VarLenInsts.insert({R, VarLenInst(DI)});
+ }
+
+ // Emit function declaration
+ OS << "void " << Target.getName()
+ << "MCCodeEmitter::getBinaryCodeForInstr(const MCInst &MI,\n"
+ << " SmallVectorImpl<MCFixup> &Fixups,\n"
+ << " APInt &Inst,\n"
+ << " APInt &Scratch,\n"
+ << " const MCSubtargetInfo &STI) const {\n";
+
+ // Emit instruction base values
+ if (HwModes.empty()) {
+ emitInstructionBaseValues(OS, NumberedInstructions, Target);
+ } else {
+ for (unsigned HwMode : HwModes)
+ emitInstructionBaseValues(OS, NumberedInstructions, Target, (int)HwMode);
+ }
+
+ if (!HwModes.empty()) {
+ OS << " const unsigned **Index;\n";
+ OS << " const uint64_t *InstBits;\n";
+ OS << " unsigned HwMode = STI.getHwMode();\n";
+ OS << " switch (HwMode) {\n";
+ OS << " default: llvm_unreachable(\"Unknown hardware mode!\"); break;\n";
+ for (unsigned I : HwModes) {
+ OS << " case " << I << ": InstBits = InstBits_" << HWM.getMode(I).Name
+ << "; Index = Index_" << HWM.getMode(I).Name << "; break;\n";
+ }
+ OS << " };\n";
+ }
+
+ // Emit helper function to retrieve base values.
+ OS << " auto getInstBits = [&](unsigned Opcode) -> APInt {\n"
+ << " unsigned NumBits = Index[Opcode][0];\n"
+ << " if (!NumBits)\n"
+ << " return APInt::getZeroWidth();\n"
+ << " unsigned Idx = Index[Opcode][1];\n"
+ << " ArrayRef<uint64_t> Data(&InstBits[Idx], "
+ << "APInt::getNumWords(NumBits));\n"
+ << " return APInt(NumBits, Data);\n"
+ << " };\n";
+
+ // Map to accumulate all the cases.
+ std::map<std::string, std::vector<std::string>> CaseMap;
+
+ // Construct all cases statement for each opcode
+ for (Record *R : Insts) {
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo"))
+ continue;
+ std::string InstName =
+ (R->getValueAsString("Namespace") + "::" + R->getName()).str();
+ std::string Case = getInstructionCase(R, Target);
+
+ CaseMap[Case].push_back(std::move(InstName));
+ }
+
+ // Emit initial function code
+ OS << " const unsigned opcode = MI.getOpcode();\n"
+ << " switch (opcode) {\n";
+
+ // Emit each case statement
+ for (const auto &C : CaseMap) {
+ const std::string &Case = C.first;
+ const auto &InstList = C.second;
+
+ ListSeparator LS("\n");
+ for (const auto &InstName : InstList)
+ OS << LS << " case " << InstName << ":";
+
+ OS << " {\n";
+ OS << Case;
+ OS << " break;\n"
+ << " }\n";
+ }
+ // Default case: unhandled opcode
+ OS << " default:\n"
+ << " std::string msg;\n"
+ << " raw_string_ostream Msg(msg);\n"
+ << " Msg << \"Not supported instr: \" << MI;\n"
+ << " report_fatal_error(Msg.str().c_str());\n"
+ << " }\n";
+ OS << "}\n\n";
+}
+
+static void emitInstBits(raw_ostream &IS, raw_ostream &SS, const APInt &Bits,
+ unsigned &Index) {
+ if (!Bits.getNumWords()) {
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0},";
+ return;
+ }
+
+ IS.indent(4) << "{/*NumBits*/" << Bits.getBitWidth() << ", "
+ << "/*Index*/" << Index << "},";
+
+ SS.indent(4);
+ for (unsigned I = 0; I < Bits.getNumWords(); ++I, ++Index)
+ SS << "UINT64_C(" << utostr(Bits.getRawData()[I]) << "),";
+}
+
+void VarLenCodeEmitterGen::emitInstructionBaseValues(
+ raw_ostream &OS, ArrayRef<const CodeGenInstruction *> NumberedInstructions,
+ CodeGenTarget &Target, int HwMode) {
+ std::string IndexArray, StorageArray;
+ raw_string_ostream IS(IndexArray), SS(StorageArray);
+
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ if (HwMode == -1) {
+ IS << " static const unsigned Index[][2] = {\n";
+ SS << " static const uint64_t InstBits[] = {\n";
+ } else {
+ StringRef Name = HWM.getMode(HwMode).Name;
+ IS << " static const unsigned Index_" << Name << "[][2] = {\n";
+ SS << " static const uint64_t InstBits_" << Name << "[] = {\n";
+ }
+
+ unsigned NumFixedValueWords = 0U;
+ for (const CodeGenInstruction *CGI : NumberedInstructions) {
+ Record *R = CGI->TheDef;
+
+ if (R->getValueAsString("Namespace") == "TargetOpcode" ||
+ R->getValueAsBit("isPseudo")) {
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0},\n";
+ continue;
+ }
+
+ Record *EncodingDef = R;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ if (EBM.hasMode(HwMode))
+ EncodingDef = EBM.get(HwMode);
+ }
+ }
+
+ auto It = VarLenInsts.find(EncodingDef);
+ if (It == VarLenInsts.end())
+ PrintFatalError(EncodingDef, "VarLenInst not found for this record");
+ const VarLenInst &VLI = It->second;
+
+ unsigned i = 0U, BitWidth = VLI.size();
+
+ // Start by filling in fixed values.
+ APInt Value(BitWidth, 0);
+ auto SI = VLI.begin(), SE = VLI.end();
+ // Scan through all the segments that have fixed-bits values.
+ while (i < BitWidth && SI != SE) {
+ unsigned SegmentNumBits = SI->first;
+ if (const auto *BI = dyn_cast<BitsInit>(SI->second)) {
+ for (unsigned Idx = 0U; Idx != SegmentNumBits; ++Idx) {
+ auto *B = cast<BitInit>(BI->getBit(Idx));
+ Value.setBitVal(i + Idx, B->getValue());
+ }
+ }
+ if (const auto *BI = dyn_cast<BitInit>(SI->second))
+ Value.setBitVal(i, BI->getValue());
+
+ i += SegmentNumBits;
+ ++SI;
+ }
+
+ emitInstBits(IS, SS, Value, NumFixedValueWords);
+ IS << '\t' << "// " << R->getName() << "\n";
+ if (Value.getNumWords())
+ SS << '\t' << "// " << R->getName() << "\n";
+ }
+ IS.indent(4) << "{/*NumBits*/0, /*Index*/0}\n };\n";
+ SS.indent(4) << "UINT64_C(0)\n };\n";
+
+ OS << IS.str() << SS.str();
+}
+
+std::string VarLenCodeEmitterGen::getInstructionCase(Record *R,
+ CodeGenTarget &Target) {
+ std::string Case;
+ if (const RecordVal *RV = R->getValue("EncodingInfos")) {
+ if (auto *DI = dyn_cast_or_null<DefInit>(RV->getValue())) {
+ const CodeGenHwModes &HWM = Target.getHwModes();
+ EncodingInfoByHwMode EBM(DI->getDef(), HWM);
+ Case += " switch (HwMode) {\n";
+ Case += " default: llvm_unreachable(\"Unhandled HwMode\");\n";
+ for (auto &KV : EBM) {
+ Case += " case " + itostr(KV.first) + ": {\n";
+ Case += getInstructionCaseForEncoding(R, KV.second, Target);
+ Case += " break;\n";
+ Case += " }\n";
+ }
+ Case += " }\n";
+ return Case;
+ }
+ }
+ return getInstructionCaseForEncoding(R, R, Target);
+}
+
+std::string VarLenCodeEmitterGen::getInstructionCaseForEncoding(
+ Record *R, Record *EncodingDef, CodeGenTarget &Target) {
+ auto It = VarLenInsts.find(EncodingDef);
+ if (It == VarLenInsts.end())
+ PrintFatalError(EncodingDef, "Parsed encoding record not found");
+ const VarLenInst &VLI = It->second;
+ size_t BitWidth = VLI.size();
+
+ CodeGenInstruction &CGI = Target.getInstruction(R);
+
+ std::string Case;
+ raw_string_ostream SS(Case);
+ // Resize the scratch buffer.
+ if (BitWidth && !VLI.isFixedValueOnly())
+ SS.indent(6) << "Scratch = Scratch.zextOrSelf(" << BitWidth << ");\n";
+ // Populate based value.
+ SS.indent(6) << "Inst = getInstBits(opcode);\n";
+
+ // Process each segment in VLI.
+ size_t Offset = 0U;
+ for (const auto &Pair : VLI) {
+ unsigned NumBits = Pair.first;
+ const Init *Val = Pair.second;
+ // If it's a StringInit or DagInit, it's a reference to an operand
+ // or part of an operand.
+ if (isa<StringInit>(Val) || isa<DagInit>(Val)) {
+ StringRef OperandName;
+ unsigned LoBit = 0U;
+ if (const auto *SV = dyn_cast<StringInit>(Val)) {
+ OperandName = SV->getValue();
+ } else {
+ // Normalized: (slice <operand name>, <high bit>, <low bit>)
+ const auto *DV = cast<DagInit>(Val);
+ OperandName = cast<StringInit>(DV->getArg(0))->getValue();
+ LoBit = static_cast<unsigned>(cast<IntInit>(DV->getArg(2))->getValue());
+ }
+
+ auto OpIdx = CGI.Operands.ParseOperandName(OperandName);
+ unsigned FlatOpIdx = CGI.Operands.getFlattenedOperandNumber(OpIdx);
+ StringRef EncoderMethodName = "getMachineOpValue";
+ auto &CustomEncoder = CGI.Operands[OpIdx.first].EncoderMethodName;
+ if (!CustomEncoder.empty())
+ EncoderMethodName = CustomEncoder;
+
+ SS.indent(6) << "Scratch.clearAllBits();\n";
+ SS.indent(6) << "// op: " << OperandName.drop_front(1) << "\n";
+ SS.indent(6) << EncoderMethodName << "(MI, MI.getOperand("
+ << utostr(FlatOpIdx) << "), Scratch, Fixups, STI);\n";
+ SS.indent(6) << "Inst.insertBits("
+ << "Scratch.extractBits(" << utostr(NumBits) << ", "
+ << utostr(LoBit) << ")"
+ << ", " << Offset << ");\n";
+ }
+ Offset += NumBits;
+ }
+
+ StringRef PostEmitter = R->getValueAsString("PostEncoderMethod");
+ if (!PostEmitter.empty())
+ SS.indent(6) << "Inst = " << PostEmitter << "(MI, Inst, STI);\n";
+
+ return Case;
+}
+
+namespace llvm {
+
+void emitVarLenCodeEmitter(RecordKeeper &R, raw_ostream &OS) {
+ VarLenCodeEmitterGen(R).run(OS);
+}
+
+} // end namespace llvm
diff --git a/llvm/utils/TableGen/VarLenCodeEmitterGen.h b/llvm/utils/TableGen/VarLenCodeEmitterGen.h
new file mode 100644
index 000000000000..330b791b7cce
--- /dev/null
+++ b/llvm/utils/TableGen/VarLenCodeEmitterGen.h
@@ -0,0 +1,25 @@
+//===- VarLenCodeEmitterGen.h - CEG for variable-length insts ---*- C++ -*-===//
+//
+// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+// See https://llvm.org/LICENSE.txt for license information.
+// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+//
+//===----------------------------------------------------------------------===//
+//
+// This file declare the CodeEmitterGen component for variable-length
+// instructions. See the .cpp file for more details.
+//
+//===----------------------------------------------------------------------===//
+
+#ifndef LLVM_UTILS_TABLEGEN_VARLENCODEEMITTERGEN_H
+#define LLVM_UTILS_TABLEGEN_VARLENCODEEMITTERGEN_H
+
+namespace llvm {
+
+class RecordKeeper;
+class raw_ostream;
+
+void emitVarLenCodeEmitter(RecordKeeper &R, raw_ostream &OS);
+
+} // end namespace llvm
+#endif
diff --git a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
index 1b802d697d2c..164923f0014b 100644
--- a/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
+++ b/llvm/utils/gn/secondary/libcxx/include/BUILD.gn
@@ -119,6 +119,7 @@ if (current_toolchain == default_toolchain) {
"__algorithm/pop_heap.h",
"__algorithm/prev_permutation.h",
"__algorithm/push_heap.h",
+ "__algorithm/ranges_min_element.h",
"__algorithm/ranges_swap_ranges.h",
"__algorithm/remove.h",
"__algorithm/remove_copy.h",
@@ -273,7 +274,6 @@ if (current_toolchain == default_toolchain) {
"__functional/unary_negate.h",
"__functional/unwrap_ref.h",
"__functional/weak_result_type.h",
- "__functional_base",
"__hash_table",
"__ios/fpos.h",
"__iterator/access.h",
diff --git a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/Utils/BUILD.gn b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/Utils/BUILD.gn
index b3ef08281b43..8ef6c45afcef 100644
--- a/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/Utils/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/lib/Target/AMDGPU/Utils/BUILD.gn
@@ -25,7 +25,6 @@ static_library("Utils") {
sources = [
"AMDGPUAsmUtils.cpp",
"AMDGPUBaseInfo.cpp",
- "AMDGPULDSUtils.cpp",
"AMDGPUMemoryUtils.cpp",
"AMDGPUPALMetadata.cpp",
"AMDKernelCodeTUtils.cpp",
diff --git a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
index 91e7b414c4f9..730bd8c10186 100644
--- a/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
+++ b/llvm/utils/gn/secondary/llvm/utils/TableGen/BUILD.gn
@@ -53,6 +53,7 @@ executable("llvm-tblgen") {
"SubtargetFeatureInfo.cpp",
"TableGen.cpp",
"Types.cpp",
+ "VarLenCodeEmitterGen.cpp",
"WebAssemblyDisassemblerEmitter.cpp",
"X86DisassemblerTables.cpp",
"X86EVEX2VEXTablesEmitter.cpp",
diff --git a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
index 261a107c0e56..ee5a34bcc83b 100644
--- a/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
+++ b/mlir/lib/Dialect/Bufferization/IR/BufferizableOpInterface.cpp
@@ -293,14 +293,13 @@ FailureOr<Value> BufferizationState::getBuffer(
void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
Operation *op,
ValueRange values) {
+ assert(values.size() == op->getNumResults() &&
+ "expected one value per OpResult");
OpBuilder::InsertionGuard g(rewriter);
// Replace all OpResults with the given values.
+ SmallVector<Value> replacements;
for (OpResult opResult : op->getOpResults()) {
- // Skip OpResult if it has no uses.
- if (opResult.getUses().empty())
- continue;
-
Value replacement = values[opResult.getResultNumber()];
if (opResult.getType().isa<TensorType>()) {
// The OpResult is a tensor. Such values are replaced with memrefs during
@@ -315,10 +314,10 @@ void bufferization::replaceOpWithBufferizedValues(RewriterBase &rewriter,
replacement = rewriter.create<bufferization::ToTensorOp>(
replacement.getLoc(), replacement);
}
- opResult.replaceAllUsesWith(replacement);
+ replacements.push_back(replacement);
}
- rewriter.eraseOp(op);
+ rewriter.replaceOp(op, replacements);
}
AlwaysCopyBufferizationState::AlwaysCopyBufferizationState(
diff --git a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
index 8bef525ec887..fbdee8aa9369 100644
--- a/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
+++ b/mlir/lib/Dialect/GPU/Transforms/SerializeToHsaco.cpp
@@ -307,11 +307,6 @@ SerializeToHsacoPass::translateToLLVMIR(llvm::LLVMContext &llvmContext) {
}
}
- // Set amdgpu_hostcall if host calls have been linked, as needed by newer LLVM
- // FIXME: Is there a way to set this during printf() lowering that makes sense
- if (ret->getFunction("__ockl_hostcall_internal"))
- if (!ret->getModuleFlag("amdgpu_hostcall"))
- ret->addModuleFlag(llvm::Module::Override, "amdgpu_hostcall", 1);
return ret;
}
diff --git a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
index 7604d14eb7d1..86ef1210c747 100644
--- a/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
+++ b/mlir/lib/Dialect/Linalg/IR/LinalgInterfaces.cpp
@@ -15,7 +15,7 @@
#include "mlir/IR/AffineExprVisitor.h"
#include "mlir/IR/AffineMap.h"
#include "mlir/IR/TypeUtilities.h"
-#include "llvm/ADT/SmallSet.h"
+#include "llvm/ADT/SmallBitVector.h"
using namespace mlir;
using namespace mlir::linalg;
@@ -484,15 +484,15 @@ SmallVector<int64_t, 4> LinalgOp::computeStaticLoopSizes() {
/// are used within an AffineExpr.
struct HasAffineDimExprVisitor
: public AffineExprVisitor<HasAffineDimExprVisitor, bool> {
- HasAffineDimExprVisitor(llvm::SmallSet<unsigned, 4> &positions)
- : positions(positions) {}
+ HasAffineDimExprVisitor(llvm::SmallBitVector positions)
+ : positions(std::move(positions)) {}
bool visitAffineBinaryOpExpr(AffineBinaryOpExpr binaryOpExpr) {
return visit(binaryOpExpr.getLHS()) || visit(binaryOpExpr.getRHS());
}
bool visitDimExpr(AffineDimExpr dimExpr) {
- return positions.count(dimExpr.getPosition());
+ return positions.test(dimExpr.getPosition());
}
bool visitConstantExpr(AffineConstantExpr constExpr) { return false; }
@@ -500,7 +500,7 @@ struct HasAffineDimExprVisitor
bool visitSymbolExpr(AffineSymbolExpr symbolExpr) { return false; }
private:
- llvm::SmallSet<unsigned, 4> positions;
+ llvm::SmallBitVector positions;
};
LogicalResult
@@ -523,19 +523,17 @@ LinalgOp::reifyResultShapes(OpBuilder &b,
/// From loopsToShapesMap extract the submap that represents the shape of the
/// (resultIdx, dim) needed.
- SmallVector<unsigned, 4> resultPosRange =
- llvm::to_vector<4>(llvm::seq<unsigned>(resultShapesSubMapPos.first,
- resultShapesSubMapPos.second));
- AffineMap loopToResultsShapeMap = loopsToShapesMap.getSubMap(resultPosRange);
+ AffineMap loopToResultsShapeMap = loopsToShapesMap.getSliceMap(
+ resultShapesSubMapPos.first,
+ resultShapesSubMapPos.second - resultShapesSubMapPos.first);
AffineMap resultShapesFromInputShapesMap =
loopToResultsShapeMap.compose(getShapesToLoopsMap());
// Check that the result dim map does not contain the positions corresponding
// to the outputs.
- llvm::SmallSet<unsigned, 4> outputDims;
- llvm::for_each(resultPosRange,
- [&outputDims](unsigned dim) { outputDims.insert(dim); });
- HasAffineDimExprVisitor checkDimExpr(outputDims);
+ llvm::SmallBitVector outputDims(resultShapesFromInputShapesMap.getNumDims());
+ outputDims.set(resultShapesSubMapPos.first, resultShapesSubMapPos.second);
+ HasAffineDimExprVisitor checkDimExpr(std::move(outputDims));
Location loc = getOperation()->getLoc();
auto allResultDimValues =
applyMapToValues(b, loc, resultShapesFromInputShapesMap,
diff --git a/mlir/lib/IR/AffineMap.cpp b/mlir/lib/IR/AffineMap.cpp
index 7fbe7488b97e..5d0ffd6fb5c6 100644
--- a/mlir/lib/IR/AffineMap.cpp
+++ b/mlir/lib/IR/AffineMap.cpp
@@ -299,22 +299,17 @@ unsigned AffineMap::getNumSymbols() const {
assert(map && "uninitialized map storage");
return map->numSymbols;
}
-unsigned AffineMap::getNumResults() const {
- assert(map && "uninitialized map storage");
- return map->results.size();
-}
+unsigned AffineMap::getNumResults() const { return getResults().size(); }
unsigned AffineMap::getNumInputs() const {
assert(map && "uninitialized map storage");
return map->numDims + map->numSymbols;
}
-
ArrayRef<AffineExpr> AffineMap::getResults() const {
assert(map && "uninitialized map storage");
- return map->results;
+ return map->results();
}
AffineExpr AffineMap::getResult(unsigned idx) const {
- assert(map && "uninitialized map storage");
- return map->results[idx];
+ return getResults()[idx];
}
unsigned AffineMap::getDimPosition(unsigned idx) const {
@@ -534,7 +529,7 @@ AffineMap AffineMap::getMajorSubMap(unsigned numResults) const {
return AffineMap();
if (numResults > getNumResults())
return *this;
- return getSubMap(llvm::to_vector<4>(llvm::seq<unsigned>(0, numResults)));
+ return getSliceMap(0, numResults);
}
AffineMap AffineMap::getMinorSubMap(unsigned numResults) const {
@@ -542,8 +537,7 @@ AffineMap AffineMap::getMinorSubMap(unsigned numResults) const {
return AffineMap();
if (numResults > getNumResults())
return *this;
- return getSubMap(llvm::to_vector<4>(
- llvm::seq<unsigned>(getNumResults() - numResults, getNumResults())));
+ return getSliceMap(getNumResults() - numResults, numResults);
}
AffineMap mlir::compressDims(AffineMap map,
diff --git a/mlir/lib/IR/AffineMapDetail.h b/mlir/lib/IR/AffineMapDetail.h
index c0a906b8365c..732c7fd1d3a1 100644
--- a/mlir/lib/IR/AffineMapDetail.h
+++ b/mlir/lib/IR/AffineMapDetail.h
@@ -17,36 +17,47 @@
#include "mlir/IR/AffineMap.h"
#include "mlir/Support/StorageUniquer.h"
#include "llvm/ADT/ArrayRef.h"
+#include "llvm/Support/TrailingObjects.h"
namespace mlir {
namespace detail {
-struct AffineMapStorage : public StorageUniquer::BaseStorage {
+struct AffineMapStorage final
+ : public StorageUniquer::BaseStorage,
+ public llvm::TrailingObjects<AffineMapStorage, AffineExpr> {
/// The hash key used for uniquing.
using KeyTy = std::tuple<unsigned, unsigned, ArrayRef<AffineExpr>>;
unsigned numDims;
unsigned numSymbols;
-
- /// The affine expressions for this (multi-dimensional) map.
- /// TODO: use trailing objects for this.
- ArrayRef<AffineExpr> results;
+ unsigned numResults;
MLIRContext *context;
+ /// The affine expressions for this (multi-dimensional) map.
+ ArrayRef<AffineExpr> results() const {
+ return {getTrailingObjects<AffineExpr>(), numResults};
+ }
+
bool operator==(const KeyTy &key) const {
return std::get<0>(key) == numDims && std::get<1>(key) == numSymbols &&
- std::get<2>(key) == results;
+ std::get<2>(key) == results();
}
// Constructs an AffineMapStorage from a key. The context must be set by the
// caller.
static AffineMapStorage *
construct(StorageUniquer::StorageAllocator &allocator, const KeyTy &key) {
- auto *res = new (allocator.allocate<AffineMapStorage>()) AffineMapStorage();
+ auto results = std::get<2>(key);
+ auto byteSize =
+ AffineMapStorage::totalSizeToAlloc<AffineExpr>(results.size());
+ auto *rawMem = allocator.allocate(byteSize, alignof(AffineMapStorage));
+ auto *res = new (rawMem) AffineMapStorage();
res->numDims = std::get<0>(key);
res->numSymbols = std::get<1>(key);
- res->results = allocator.copyInto(std::get<2>(key));
+ res->numResults = results.size();
+ std::uninitialized_copy(results.begin(), results.end(),
+ res->getTrailingObjects<AffineExpr>());
return res;
}
};
diff --git a/openmp/runtime/src/kmp_settings.cpp b/openmp/runtime/src/kmp_settings.cpp
index 27ab51dbf7ed..481f605fcd5a 100644
--- a/openmp/runtime/src/kmp_settings.cpp
+++ b/openmp/runtime/src/kmp_settings.cpp
@@ -1245,13 +1245,25 @@ static void __kmp_stg_parse_num_hidden_helper_threads(char const *name,
// task
if (__kmp_hidden_helper_threads_num == 0) {
__kmp_enable_hidden_helper = FALSE;
+ } else {
+ // Since the main thread of hidden helper team dooes not participate
+ // in tasks execution let's increment the number of threads by one
+ // so that requested number of threads do actual job.
+ __kmp_hidden_helper_threads_num++;
}
} // __kmp_stg_parse_num_hidden_helper_threads
static void __kmp_stg_print_num_hidden_helper_threads(kmp_str_buf_t *buffer,
char const *name,
void *data) {
- __kmp_stg_print_int(buffer, name, __kmp_hidden_helper_threads_num);
+ if (__kmp_hidden_helper_threads_num == 0) {
+ __kmp_stg_print_int(buffer, name, __kmp_hidden_helper_threads_num);
+ } else {
+ KMP_DEBUG_ASSERT(__kmp_hidden_helper_threads_num > 1);
+ // Let's exclude the main thread of hidden helper team and print
+ // number of worker threads those do actual job.
+ __kmp_stg_print_int(buffer, name, __kmp_hidden_helper_threads_num - 1);
+ }
} // __kmp_stg_print_num_hidden_helper_threads
static void __kmp_stg_parse_use_hidden_helper(char const *name,
diff --git a/openmp/runtime/test/tasking/hidden_helper_task/single_helper_thread.c b/openmp/runtime/test/tasking/hidden_helper_task/single_helper_thread.c
new file mode 100644
index 000000000000..a1aeda76e22f
--- /dev/null
+++ b/openmp/runtime/test/tasking/hidden_helper_task/single_helper_thread.c
@@ -0,0 +1,21 @@
+// RUN: %libomp-compile && env LIBOMP_NUM_HIDDEN_HELPER_THREADS=1 %libomp-run
+
+// The test checks that "devide-by-0" bug fixed in runtime.
+// The fix is to increment number of threads by 1 if positive,
+// so that operation
+// (gtid) % (__kmp_hidden_helper_threads_num - 1)
+// does not cause crash.
+
+#include <stdio.h>
+#include <omp.h>
+
+int main(){
+#pragma omp target nowait
+ {
+ printf("----- in target region\n");
+ }
+ printf("------ before taskwait\n");
+#pragma omp taskwait
+ printf("passed\n");
+ return 0;
+}
diff --git a/polly/test/ForwardOpTree/changed-kind.ll b/polly/test/ForwardOpTree/changed-kind.ll
index a66ba557d2a8..bf81f51021b5 100644
--- a/polly/test/ForwardOpTree/changed-kind.ll
+++ b/polly/test/ForwardOpTree/changed-kind.ll
@@ -43,12 +43,5 @@ lor.end93:
; CHECK: Statistics {
-; CHECK: Reloads: 1
-; CHECK: }
-
-; CHECK: After statements {
-; CHECK: Stmt_lor_end93
-; CHECK-NEXT: ReadAccess := [Reduction Type: NONE] [Scalar: 1]
-; CHECK-NEXT: { Stmt_lor_end93[] -> MemRef3[] };
-; CHECK-NEXT: new: { Stmt_lor_end93[] -> MemRef_c[0] };
+; CHECK: Reloads: 0
; CHECK: }
diff --git a/test/.gitattributes b/test/.gitattributes
new file mode 100644
index 000000000000..9d17a32c7b26
--- /dev/null
+++ b/test/.gitattributes
@@ -0,0 +1,19 @@
+# CRLF (Windows) line endings take two bytes instead of one, so any tests that
+# rely on or check fixed character -offset, Offset: or FileOffset: locations
+# will fail when run on input files checked out with different line endings.
+
+# Most test input files should use native line endings, to ensure that we run
+# tests against both line ending types.
+* text=auto
+
+# These test input files rely on one-byte Unix (LF) line-endings, as they use
+# fixed -offset, FileOffset:, or Offset: numbers in their tests.
+clang-apply-replacements/ClangRenameClassReplacements.cpp text eol=lf
+clang-apply-replacements/Inputs/basic/basic.h text eol=lf
+clang-apply-replacements/Inputs/format/no.cpp text eol=lf
+clang-apply-replacements/Inputs/format/yes.cpp text eol=lf
+clang-tidy/infrastructure/export-diagnostics.cpp text eol=lf
+
+# These test input files rely on two-byte Windows (CRLF) line endings.
+clang-apply-replacements/Inputs/crlf/crlf.cpp text eol=crlf
+clang-apply-replacements/Inputs/crlf/crlf.cpp.expected text eol=crlf
diff --git a/utils/bazel/README.md b/utils/bazel/README.md
index fe8ead32f7d9..26e13cc4641e 100644
--- a/utils/bazel/README.md
+++ b/utils/bazel/README.md
@@ -73,7 +73,30 @@ platform-specific are selected for in defines. Many are also hardcoded to the
values currently used by all supported configurations. If there is a
configuration you'd like to use that isn't supported, please send a patch.
-# Usage
+# Continuous Testing
+
+A [Buildkite pipeline](https://buildkite.com/llvm-project/upstream-bazel-rbe)
+runs the full Bazel build on every commit to the main branch. Notifications of
+failures are sent to the
+[llvm-bazel-alerts google group](https://groups.google.com/g/llvm-bazel-alerts),
+which anyone is free to join. Currently, the behavior is just to send an email
+on each failure using Buildkite's built-in notification system, so if you
+subscribe, it is highly recommended that you set up email filters or some other
+mechanism to not flood your inbox. More sophisticated notifications, e.g. only
+on status change or routed based on blamelist are TODO (contributions welcome).
+
+# Pre-merge Testing
+
+A Buildkite pipeline runs the full Bazel build as a pre-merge test using the
+[LLVM pre-merge testing](https://github.com/google/llvm-premerge-checks/). It
+is triggered on all changes to the utils/bazel directory and when the patch
+author is a member of the
+[Bazel Phabricator project](https://reviews.llvm.org/project/members/107/). If
+you use or benefit from the Bazel build, please join the project so that you
+can help keep it green. As a bonus, it runs in under 5 minutes, much faster
+than any of the other pre-merge builds.
+
+# Usage in Downstream Projects
To use in dependent projects using Bazel, you can import LLVM and then use the
provided configuration rule. See example usage in the `examples/` directory.
diff --git a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
index 43462d77d00e..fedcd79faa40 100644
--- a/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/clang/BUILD.bazel
@@ -1344,6 +1344,7 @@ cc_library(
"//llvm:ProfileData",
"//llvm:Support",
"//llvm:Target",
+ "//llvm:WindowsDriver",
"//llvm:config",
],
)
diff --git a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
index e204f7b81a0a..2009a5580822 100644
--- a/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/lld/BUILD.bazel
@@ -140,6 +140,7 @@ cc_library(
"//llvm:Support",
"//llvm:Symbolize",
"//llvm:TransformUtils",
+ "//llvm:WindowsDriver",
"//llvm:WindowsManifest",
],
)
diff --git a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
index 16dd647866c0..73fcd06a0a44 100644
--- a/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
+++ b/utils/bazel/llvm-project-overlay/llvm/BUILD.bazel
@@ -2394,6 +2394,21 @@ cc_library(
)
cc_library(
+ name = "WindowsDriver",
+ srcs = glob([
+ "lib/WindowsDriver/*.cpp",
+ ]),
+ hdrs = glob([
+ "include/llvm/WindowsDriver/*.h",
+ ]),
+ copts = llvm_copts,
+ deps = [
+ ":Option",
+ ":Support",
+ ],
+)
+
+cc_library(
name = "WindowsManifest",
srcs = glob([
"lib/WindowsManifest/*.cpp",