Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[n, k//2] int4pack to [n, k//8] w/o transpose #1186

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
22 changes: 2 additions & 20 deletions src/ATen/native/xpu/WeightInt4Pack.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
namespace at::native {

// input is [n][k / 2] (uint8 dtype)
// output is [n / 8][k / (InnerKTiles * 16)][32][innerKTiles / 2] (int32 dtype)
// output is [n][k // 8]
Tensor _convert_weight_to_int4pack_xpu(const Tensor& in, int64_t innerKTiles) {
TORCH_CHECK(in.dim() == 2, __func__, " : expect weight to be 2D tensor.");
TORCH_CHECK(
Expand All @@ -18,25 +18,7 @@ Tensor _convert_weight_to_int4pack_xpu(const Tensor& in, int64_t innerKTiles) {
auto N = weight.size(0);
auto K = weight.size(1) * 2;

// Create fake shapes for cpu. The meta registration in dynamo requires
// operator has the same output shape for each device. So creating a fake
// shape {N / 8, K / (16 * innerKTiles), 32, innerKTiles / 2}
constexpr int64_t kNTileSize = 8;
constexpr int64_t kKTileSize = 16;
auto nTiles = (N + kNTileSize - 1) / kNTileSize;

TORCH_CHECK(N % 16 == 0, __func__, " : expect N to be dividable by 16");
const int64_t kSuperKTileSize = kKTileSize * innerKTiles;
TORCH_CHECK(
K % kSuperKTileSize == 0,
__func__,
" : epxect K to be dividable by ",
kSuperKTileSize);
auto kSuperTiles = (K + kSuperKTileSize - 1) / kSuperKTileSize;

auto weight_packed = at::empty(
{nTiles, kSuperTiles, 32, innerKTiles / 2},
at::TensorOptions().dtype(at::kInt).device(in.device()));
auto weight_packed = at::empty({N, K / 8}, at::TensorOptions().dtype(at::kInt).device(in.device()));

xpu::weight_to_int4pack_kernel(weight_packed, weight, N, K);
return weight_packed;
Expand Down
14 changes: 8 additions & 6 deletions src/ATen/native/xpu/sycl/WeightInt4PackKernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -9,19 +9,21 @@ namespace at::native::xpu {
struct WeightToInt4PackKernelFunctor {
void operator()(sycl::item<1> item) const {
auto idx = item.get_linear_id();
int out_y = idx / N_;
int out_x = idx % N_;
int in_y = out_x;
int in_x = out_y * 4;
int K_div_2 = K_ / 2;
int K_div_8 = K_ / 8;
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

although might never happened, but could you handle K_ not divided by 8?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

good point, let me add an assert

int out_y = idx / K_div_8;
int out_x = idx % K_div_8;
int in_y = out_y;
int in_x = out_x * 4;

using vec_t = memory::aligned_vector<uint8_t, 4>;
vec_t input = *reinterpret_cast<vec_t*>(&weight_[in_y * K_div_2 + in_x]);
vec_t output;
#pragma unroll
for (int i = 0; i < 4; i++) {
output[i] = input[3 - i];
output[i] = input[i];
}
*reinterpret_cast<vec_t*>(&weight_packed_[out_y * N_ + out_x]) = output;
*reinterpret_cast<vec_t*>(&weight_packed_[out_y * K_div_8 + out_x]) = output;
}
WeightToInt4PackKernelFunctor(
uint32_t* weight_packed,
Expand Down
Loading