|
| 1 | +//===-- spirvamdgpuintrin.h - spirv amdgpu intrinsic functions -----------===// |
| 2 | +// |
| 3 | +// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | +// See https://llvm.org/LICENSE.txt for license information. |
| 5 | +// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | +// |
| 7 | +//===----------------------------------------------------------------------===// |
| 8 | + |
| 9 | +#ifndef __SPIRVAMDGPUINTRIN_H |
| 10 | +#define __SPIRVAMDGPUINTRIN_H |
| 11 | + |
| 12 | +#if !defined( __SPIRV64__) || !defined(__AMDGPU__) |
| 13 | +#error "This file is intended for the spirv64-amd-amdhsa target" |
| 14 | +#endif |
| 15 | + |
| 16 | +#ifndef __GPUINTRIN_H |
| 17 | +#error "Never use <spirvamdgcnintrin.h> directly; include <gpuintrin.h> instead" |
| 18 | +#endif |
| 19 | + |
| 20 | +_Pragma("omp begin declare target device_type(nohost)"); |
| 21 | +_Pragma("omp begin declare variant match(device = {arch(amdgcn)})"); |
| 22 | + |
| 23 | +// Type aliases to the address spaces used by the SPIRV64 AMDGPU backend. |
| 24 | +#define __gpu_private __attribute__((address_space(0))) |
| 25 | +#define __gpu_constant __attribute__((address_space(1))) |
| 26 | +#define __gpu_local __attribute__((address_space(3))) |
| 27 | +#define __gpu_global __attribute__((address_space(1))) |
| 28 | +#define __gpu_generic __attribute__((address_space(4))) |
| 29 | + |
| 30 | +// Attribute to declare a function as a kernel is not available on spirv |
| 31 | +#define __gpu_kernel |
| 32 | + |
| 33 | +// Returns the number of workgroups in the 'x' dimension of the grid. |
| 34 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_x(void) { |
| 35 | + return __builtin_amdgcn_grid_size_x() / __builtin_amdgcn_workgroup_size_x(); |
| 36 | +} |
| 37 | + |
| 38 | +// Returns the number of workgroups in the 'y' dimension of the grid. |
| 39 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_y(void) { |
| 40 | + return __builtin_amdgcn_grid_size_y() / __builtin_amdgcn_workgroup_size_y(); |
| 41 | +} |
| 42 | + |
| 43 | +// Returns the number of workgroups in the 'z' dimension of the grid. |
| 44 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_blocks_z(void) { |
| 45 | + return __builtin_amdgcn_grid_size_z() / __builtin_amdgcn_workgroup_size_z(); |
| 46 | +} |
| 47 | + |
| 48 | +// Returns the 'x' dimension of the current AMD workgroup's id. |
| 49 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_x(void) { |
| 50 | + return __builtin_amdgcn_workgroup_id_x(); |
| 51 | +} |
| 52 | + |
| 53 | +// Returns the 'y' dimension of the current AMD workgroup's id. |
| 54 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_y(void) { |
| 55 | + return __builtin_amdgcn_workgroup_id_y(); |
| 56 | +} |
| 57 | + |
| 58 | +// Returns the 'z' dimension of the current AMD workgroup's id. |
| 59 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_block_id_z(void) { |
| 60 | + return __builtin_amdgcn_workgroup_id_z(); |
| 61 | +} |
| 62 | + |
| 63 | +// Returns the number of workitems in the 'x' dimension. |
| 64 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_x(void) { |
| 65 | + return __builtin_amdgcn_workgroup_size_x(); |
| 66 | +} |
| 67 | + |
| 68 | +// Returns the number of workitems in the 'y' dimension. |
| 69 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_y(void) { |
| 70 | + return __builtin_amdgcn_workgroup_size_y(); |
| 71 | +} |
| 72 | + |
| 73 | +// Returns the number of workitems in the 'z' dimension. |
| 74 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_threads_z(void) { |
| 75 | + return __builtin_amdgcn_workgroup_size_z(); |
| 76 | +} |
| 77 | + |
| 78 | +// Returns the 'x' dimension id of the workitem in the current AMD workgroup. |
| 79 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_x(void) { |
| 80 | + return __builtin_amdgcn_workitem_id_x(); |
| 81 | +} |
| 82 | + |
| 83 | +// Returns the 'y' dimension id of the workitem in the current AMD workgroup. |
| 84 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_y(void) { |
| 85 | + return __builtin_amdgcn_workitem_id_y(); |
| 86 | +} |
| 87 | + |
| 88 | +// Returns the 'z' dimension id of the workitem in the current AMD workgroup. |
| 89 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_thread_id_z(void) { |
| 90 | + return __builtin_amdgcn_workitem_id_z(); |
| 91 | +} |
| 92 | + |
| 93 | +// Returns the size of an AMD wavefront, either 32 or 64 depending on hardware |
| 94 | +// and compilation options. |
| 95 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_num_lanes(void) { |
| 96 | + return __builtin_amdgcn_wavefrontsize(); |
| 97 | +} |
| 98 | + |
| 99 | +// Returns the id of the thread inside of an AMD wavefront executing together. |
| 100 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t __gpu_lane_id(void) { |
| 101 | + return __builtin_amdgcn_mbcnt_hi(~0u, __builtin_amdgcn_mbcnt_lo(~0u, 0u)); |
| 102 | +} |
| 103 | + |
| 104 | +// Returns the bit-mask of active threads in the current wavefront. |
| 105 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_lane_mask(void) { |
| 106 | + return __builtin_amdgcn_read_exec(); |
| 107 | +} |
| 108 | + |
| 109 | +// Copies the value from the first active thread in the wavefront to the rest. |
| 110 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t |
| 111 | +__gpu_read_first_lane_u32(uint64_t __lane_mask, uint32_t __x) { |
| 112 | + return __builtin_amdgcn_readfirstlane(__x); |
| 113 | +} |
| 114 | + |
| 115 | +// Returns a bitmask of threads in the current lane for which \p x is true. |
| 116 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t __gpu_ballot(uint64_t __lane_mask, |
| 117 | + bool __x) { |
| 118 | + // The lane_mask & gives the nvptx semantics when lane_mask is a subset of |
| 119 | + // the active threads |
| 120 | + return __lane_mask & __builtin_amdgcn_ballot_w64(__x); |
| 121 | +} |
| 122 | + |
| 123 | +// Waits for all the threads in the block to converge and issues a fence. |
| 124 | +_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_threads(void) { |
| 125 | + __builtin_amdgcn_s_barrier(); |
| 126 | + __builtin_amdgcn_fence(__ATOMIC_SEQ_CST, "workgroup"); |
| 127 | +} |
| 128 | + |
| 129 | +// Wait for all threads in the wavefront to converge, this is a noop on AMDGPU. |
| 130 | +_DEFAULT_FN_ATTRS static __inline__ void __gpu_sync_lane(uint64_t __lane_mask) { |
| 131 | + __builtin_amdgcn_wave_barrier(); |
| 132 | +} |
| 133 | + |
| 134 | +// Shuffles the the lanes inside the wavefront according to the given index. |
| 135 | +_DEFAULT_FN_ATTRS static __inline__ uint32_t |
| 136 | +__gpu_shuffle_idx_u32(uint64_t __lane_mask, uint32_t __idx, uint32_t __x, |
| 137 | + uint32_t __width) { |
| 138 | + uint32_t __lane = __idx + (__gpu_lane_id() & ~(__width - 1)); |
| 139 | + return __builtin_amdgcn_ds_bpermute(__lane << 2, __x); |
| 140 | +} |
| 141 | + |
| 142 | +// Returns a bitmask marking all lanes that have the same value of __x. |
| 143 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 144 | +__gpu_match_any_u32(uint64_t __lane_mask, uint32_t __x) { |
| 145 | + return __gpu_match_any_u32_impl(__lane_mask, __x); |
| 146 | +} |
| 147 | + |
| 148 | +// Returns a bitmask marking all lanes that have the same value of __x. |
| 149 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 150 | +__gpu_match_any_u64(uint64_t __lane_mask, uint64_t __x) { |
| 151 | + return __gpu_match_any_u64_impl(__lane_mask, __x); |
| 152 | +} |
| 153 | + |
| 154 | +// Returns the current lane mask if every lane contains __x. |
| 155 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 156 | +__gpu_match_all_u32(uint64_t __lane_mask, uint32_t __x) { |
| 157 | + return __gpu_match_all_u32_impl(__lane_mask, __x); |
| 158 | +} |
| 159 | + |
| 160 | +// Returns the current lane mask if every lane contains __x. |
| 161 | +_DEFAULT_FN_ATTRS static __inline__ uint64_t |
| 162 | +__gpu_match_all_u64(uint64_t __lane_mask, uint64_t __x) { |
| 163 | + return __gpu_match_all_u64_impl(__lane_mask, __x); |
| 164 | +} |
| 165 | + |
| 166 | +// Returns true if the flat pointer points to AMDGPU 'shared' memory. |
| 167 | +_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_local(void *ptr) { |
| 168 | + return __builtin_amdgcn_is_shared((void [[clang::address_space(0)]] *)(( |
| 169 | + void [[clang::opencl_generic]] *)ptr)); |
| 170 | +} |
| 171 | + |
| 172 | +// Returns true if the flat pointer points to AMDGPU 'private' memory. |
| 173 | +_DEFAULT_FN_ATTRS static __inline__ bool __gpu_is_ptr_private(void *ptr) { |
| 174 | + return __builtin_amdgcn_is_private((void [[clang::address_space(0)]] *)(( |
| 175 | + void [[clang::opencl_generic]] *)ptr)); |
| 176 | +} |
| 177 | + |
| 178 | +// Terminates execution of the associated wavefront. |
| 179 | +_DEFAULT_FN_ATTRS [[noreturn]] static __inline__ void __gpu_exit(void) { |
| 180 | + __builtin_amdgcn_endpgm(); |
| 181 | +} |
| 182 | + |
| 183 | +// Suspend the thread briefly to assist the scheduler during busy loops. |
| 184 | +_DEFAULT_FN_ATTRS static __inline__ void __gpu_thread_suspend(void) { |
| 185 | + __builtin_amdgcn_s_sleep(2); |
| 186 | +} |
| 187 | + |
| 188 | +_Pragma("omp end declare variant"); |
| 189 | +_Pragma("omp end declare target"); |
| 190 | + |
| 191 | +#endif // __SPIRVAMDGPUINTRIN_H |
0 commit comments