forked from intel/llm-on-ray
-
Notifications
You must be signed in to change notification settings - Fork 0
/
pyproject.toml
88 lines (78 loc) · 2.01 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
[build-system]
requires = ["setuptools", "setuptools-scm"]
build-backend = "setuptools.build_meta"
[project]
name = "llm-on-ray"
version = "0.0.1"
description = "LLM on Ray Workflow"
readme = "README.md"
requires-python = ">=3.9"
keywords = ["llm", "ray"]
license = {text = "Apache-2.0"}
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"Programming Language :: Python :: 3",
"Environment :: GPU :: Intel Data Center GPU",
"Environment :: HPU :: Gaudi",
"Framework :: Ray"
]
dependencies = [
"accelerate",
"datasets>=2.14.6",
"numpy<2.0.0",
"ray>=2.10",
"ray[serve,tune]>=2.10",
"typing>=3.7.4.3",
"tabulate",
"gymnasium",
"dm-tree",
"tensorboard",
"scikit-image",
"einops",
"peft>=0.4.0",
"py-cpuinfo",
"pydantic-yaml",
"async_timeout",
"typer",
"matplotlib"
]
[project.optional-dependencies]
cpu = [
"transformers>=4.38.0, <=4.38.1",
"intel_extension_for_pytorch==2.3.100",
"torch==2.2.0",
"oneccl_bind_pt==2.2.0"
]
gpu = [
"transformers>=4.35.0",
"torch==2.1.0a0",
"torchvision==0.16.0a0",
"intel_extension_for_pytorch==2.1.10+xpu",
"oneccl_bind_pt==2.1.100+xpu",
"dpctl==0.15.0",
"deepspeed==0.13.1"
]
deepspeed = [
"packaging>=20.0",
"psutil",
"tqdm",
# 0.10.2 is required to support bloom
"deepspeed>=0.10.2, <0.11.2"
]
ipex-llm = [
"ipex-llm[all]==2.1.0b20240408"
]
[tool.setuptools]
# with MANIFEST.in, the configs below work in both baremetal and container
package-dir = {"llm_on_ray" = "llm_on_ray"}
include-package-data = true
[project.urls]
Repository = "https://github.com/intel/llm-on-ray.git"
Issues = "https://github.com/intel/llm-on-ray.git/issues"
[project.scripts]
llm_on_ray-finetune = "llm_on_ray.finetune.finetune:main"
llm_on_ray-serve = "llm_on_ray.inference.serve:main"
llm_on_ray-pretrain = "llm_on_ray.pretrain.pretrain:main"
llm_on_ray-megatron_deepspeed_pretrain = "llm_on_ray.pretrain.megatron_deepspeed_pretrain:main"
[tool.black]
line-length = 100