Skip to content

Commit 8430abe

Browse files
authored
added hf token arg to script (#30)
1 parent d9f3ef3 commit 8430abe

File tree

1 file changed

+2
-1
lines changed

1 file changed

+2
-1
lines changed

generate/generate.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@
3333
parser.add_argument('--do_sample', action='store_true', help='Enable sampling (default: False)')
3434
parser.add_argument('--batch_size', type=int, default=16, help='Batch size for generation (default: 8)')
3535
parser.add_argument('--prompted', action='store_true', help='Use prompted generation. See StarCoder paper (default: False)')
36+
parser.add_argument('--hf_token', type=str, help='HuggingFace API token for loading models')
3637
args = parser.parse_args()
3738

3839
""" Load prompts """
@@ -97,7 +98,7 @@
9798
prompts_repeated = [p for p in prompts for _ in range(args.num_samples_per_prompt)]
9899

99100
""" Initialize HuggingFace pipeline for generation """
100-
generator = pipeline(model=args.model, torch_dtype=inference_config.get_dtype(), device=0)
101+
generator = pipeline(model=args.model, torch_dtype=inference_config.get_dtype(), device=0, token=args.hf_token)
101102
inference_config.init_padding(generator.tokenizer)
102103

103104
""" Create a prompt data set to pass to generate method """

0 commit comments

Comments
 (0)