Skip to content

Commit 26c6281

Browse files
committed
Formatting
1 parent 4a70b2e commit 26c6281

File tree

1 file changed

+3
-2
lines changed

1 file changed

+3
-2
lines changed

olmocr/pipeline.py

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1015,7 +1015,6 @@ async def main():
10151015
default="allenai/olmOCR-7B-0725-FP8",
10161016
)
10171017

1018-
10191018
# More detailed config options, usually you shouldn't have to change these
10201019
parser.add_argument("--workspace_profile", help="S3 configuration profile for accessing the workspace", default=None)
10211020
parser.add_argument("--pdf_profile", help="S3 configuration profile for accessing the raw pdf documents", default=None)
@@ -1032,7 +1031,9 @@ async def main():
10321031
parser.add_argument("--guided_decoding", action="store_true", help="Enable guided decoding for model YAML type outputs")
10331032

10341033
vllm_group = parser.add_argument_group("VLLM Forwarded arguments")
1035-
vllm_group.add_argument("--gpu-memory-utilization", type=float, help="Fraction of VRAM vLLM may pre-allocate for KV-cache " "(passed through to vllm serve).")
1034+
vllm_group.add_argument(
1035+
"--gpu-memory-utilization", type=float, help="Fraction of VRAM vLLM may pre-allocate for KV-cache " "(passed through to vllm serve)."
1036+
)
10361037
vllm_group.add_argument("--max_model_len", type=int, default=16384, help="Upper bound (tokens) vLLM will allocate KV-cache for, lower if VLLM won't start")
10371038
vllm_group.add_argument("--tensor-parallel-size", "-tp", type=int, default=1, help="Tensor parallel size for vLLM")
10381039
vllm_group.add_argument("--data-parallel-size", "-dp", type=int, default=1, help="Data parallel size for vLLM")

0 commit comments

Comments
 (0)