Skip to content

Commit 6338167

Browse files
committed
♻️ Don't convert to camelCase when generating inference types
1 parent bd696fa commit 6338167

File tree

24 files changed

+131
-131
lines changed

24 files changed

+131
-131
lines changed

packages/tasks/scripts/inference-codegen.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -57,7 +57,7 @@ async function generateTypescript(inputData: InputData): Promise<SerializedRende
5757
indentation: "\t",
5858
rendererOptions: {
5959
"just-types": true,
60-
"nice-property-names": true,
60+
"nice-property-names": false,
6161
"prefer-unions": true,
6262
"prefer-const-values": true,
6363
"prefer-unknown": true,

packages/tasks/src/tasks/audio-classification/inference.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ export interface AudioClassificationInput {
2323
* Additional inference parameters for Audio Classification
2424
*/
2525
export interface AudioClassificationParameters {
26-
functionToApply?: ClassificationOutputTransform;
26+
function_to_apply?: ClassificationOutputTransform;
2727
/**
2828
* When specified, limits the output to the top K most probable classes.
2929
*/
30-
topK?: number;
30+
top_k?: number;
3131
[property: string]: unknown;
3232
}
3333
/**

packages/tasks/src/tasks/automatic-speech-recognition/inference.ts

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ export interface AutomaticSpeechRecognitionParameters {
3232
/**
3333
* Whether to output corresponding timestamps with the generated text
3434
*/
35-
returnTimestamps?: boolean;
35+
return_timestamps?: boolean;
3636
[property: string]: unknown;
3737
}
3838

@@ -45,18 +45,18 @@ export interface GenerationParameters {
4545
/**
4646
* Whether to use sampling instead of greedy decoding when generating new tokens.
4747
*/
48-
doSample?: boolean;
48+
do_sample?: boolean;
4949
/**
5050
* Controls the stopping condition for beam-based methods.
5151
*/
52-
earlyStopping?: EarlyStoppingUnion;
52+
early_stopping?: EarlyStoppingUnion;
5353
/**
5454
* If set to float strictly between 0 and 1, only tokens with a conditional probability
5555
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
5656
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
5757
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
5858
*/
59-
epsilonCutoff?: number;
59+
epsilon_cutoff?: number;
6060
/**
6161
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
6262
* float strictly between 0 and 1, a token is only considered if it is greater than either
@@ -66,62 +66,62 @@ export interface GenerationParameters {
6666
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
6767
* for more details.
6868
*/
69-
etaCutoff?: number;
69+
eta_cutoff?: number;
7070
/**
7171
* The maximum length (in tokens) of the generated text, including the input.
7272
*/
73-
maxLength?: number;
73+
max_length?: number;
7474
/**
7575
* The maximum number of tokens to generate. Takes precedence over maxLength.
7676
*/
77-
maxNewTokens?: number;
77+
max_new_tokens?: number;
7878
/**
7979
* The minimum length (in tokens) of the generated text, including the input.
8080
*/
81-
minLength?: number;
81+
min_length?: number;
8282
/**
8383
* The minimum number of tokens to generate. Takes precedence over maxLength.
8484
*/
85-
minNewTokens?: number;
85+
min_new_tokens?: number;
8686
/**
8787
* Number of groups to divide num_beams into in order to ensure diversity among different
8888
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
8989
*/
90-
numBeamGroups?: number;
90+
num_beam_groups?: number;
9191
/**
9292
* Number of beams to use for beam search.
9393
*/
94-
numBeams?: number;
94+
num_beams?: number;
9595
/**
9696
* The value balances the model confidence and the degeneration penalty in contrastive
9797
* search decoding.
9898
*/
99-
penaltyAlpha?: number;
99+
penalty_alpha?: number;
100100
/**
101101
* The value used to modulate the next token probabilities.
102102
*/
103103
temperature?: number;
104104
/**
105105
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
106106
*/
107-
topK?: number;
107+
top_k?: number;
108108
/**
109109
* If set to float < 1, only the smallest set of most probable tokens with probabilities
110110
* that add up to top_p or higher are kept for generation.
111111
*/
112-
topP?: number;
112+
top_p?: number;
113113
/**
114114
* Local typicality measures how similar the conditional probability of predicting a target
115115
* token next is to the expected conditional probability of predicting a random token next,
116116
* given the partial text already generated. If set to float < 1, the smallest set of the
117117
* most locally typical tokens with probabilities that add up to typical_p or higher are
118118
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
119119
*/
120-
typicalP?: number;
120+
typical_p?: number;
121121
/**
122122
* Whether the model should use the past last key/values attentions to speed up decoding
123123
*/
124-
useCache?: boolean;
124+
use_cache?: boolean;
125125
[property: string]: unknown;
126126
}
127127

packages/tasks/src/tasks/depth-estimation/inference.ts

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,6 @@ export interface DepthEstimationParameters {
3030
/**
3131
* When specified, limits the output to the top K most probable classes.
3232
*/
33-
topK?: number;
33+
top_k?: number;
3434
[property: string]: unknown;
3535
}

packages/tasks/src/tasks/document-question-answering/inference.ts

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -42,11 +42,11 @@ export interface DocumentQuestionAnsweringParameters {
4242
* be split in several chunks with some overlap. This argument controls the size of that
4343
* overlap.
4444
*/
45-
docStride?: number;
45+
doc_stride?: number;
4646
/**
4747
* Whether to accept impossible as an answer
4848
*/
49-
handleImpossibleAnswer?: boolean;
49+
handle_impossible_answer?: boolean;
5050
/**
5151
* Language to use while running OCR. Defaults to english.
5252
*/
@@ -55,27 +55,27 @@ export interface DocumentQuestionAnsweringParameters {
5555
* The maximum length of predicted answers (e.g., only answers with a shorter length are
5656
* considered).
5757
*/
58-
maxAnswerLen?: number;
58+
max_answer_len?: number;
5959
/**
6060
* The maximum length of the question after tokenization. It will be truncated if needed.
6161
*/
62-
maxQuestionLen?: number;
62+
max_question_len?: number;
6363
/**
6464
* The maximum length of the total sentence (context + question) in tokens of each chunk
6565
* passed to the model. The context will be split in several chunks (using doc_stride as
6666
* overlap) if needed.
6767
*/
68-
maxSeqLen?: number;
68+
max_seq_len?: number;
6969
/**
7070
* The number of answers to return (will be chosen by order of likelihood). Can return less
7171
* than top_k answers if there are not enough options available within the context.
7272
*/
73-
topK?: number;
73+
top_k?: number;
7474
/**
7575
* A list of words and bounding boxes (normalized 0->1000). If provided, the inference will
7676
* skip the OCR step and use the provided bounding boxes instead.
7777
*/
78-
wordBoxes?: WordBox[];
78+
word_boxes?: WordBox[];
7979
[property: string]: unknown;
8080
}
8181
export type WordBox = number[] | string;

packages/tasks/src/tasks/fill-mask/inference.ts

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -33,18 +33,14 @@ export interface FillMaskParameters {
3333
/**
3434
* When passed, overrides the number of predictions to return.
3535
*/
36-
topK?: number;
36+
top_k?: number;
3737
[property: string]: unknown;
3838
}
3939
export type FillMaskOutput = FillMaskOutputElement[];
4040
/**
4141
* Outputs of inference for the Fill Mask task
4242
*/
4343
export interface FillMaskOutputElement {
44-
/**
45-
* The predicted token (to replace the masked one).
46-
*/
47-
fillMaskOutputTokenStr?: string;
4844
/**
4945
* The corresponding probability
5046
*/
@@ -58,5 +54,9 @@ export interface FillMaskOutputElement {
5854
*/
5955
token: number;
6056
tokenStr: unknown;
57+
/**
58+
* The predicted token (to replace the masked one).
59+
*/
60+
token_str?: string;
6161
[property: string]: unknown;
6262
}

packages/tasks/src/tasks/image-classification/inference.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -23,11 +23,11 @@ export interface ImageClassificationInput {
2323
* Additional inference parameters for Image Classification
2424
*/
2525
export interface ImageClassificationParameters {
26-
functionToApply?: ClassificationOutputTransform;
26+
function_to_apply?: ClassificationOutputTransform;
2727
/**
2828
* When specified, limits the output to the top K most probable classes.
2929
*/
30-
topK?: number;
30+
top_k?: number;
3131
[property: string]: unknown;
3232
}
3333
/**

packages/tasks/src/tasks/image-segmentation/inference.ts

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -26,11 +26,11 @@ export interface ImageSegmentationParameters {
2626
/**
2727
* Threshold to use when turning the predicted masks into binary values.
2828
*/
29-
maskThreshold?: number;
29+
mask_threshold?: number;
3030
/**
3131
* Mask overlap threshold to eliminate small, disconnected segments.
3232
*/
33-
overlapMaskAreaThreshold?: number;
33+
overlap_mask_area_threshold?: number;
3434
/**
3535
* Segmentation task to be performed, depending on model capabilities.
3636
*/

packages/tasks/src/tasks/image-to-image/inference.ts

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -29,20 +29,20 @@ export interface ImageToImageParameters {
2929
* For diffusion models. A higher guidance scale value encourages the model to generate
3030
* images closely linked to the text prompt at the expense of lower image quality.
3131
*/
32-
guidanceScale?: number;
32+
guidance_scale?: number;
3333
/**
3434
* One or several prompt to guide what NOT to include in image generation.
3535
*/
36-
negativePrompt?: string[];
36+
negative_prompt?: string[];
3737
/**
3838
* For diffusion models. The number of denoising steps. More denoising steps usually lead to
3939
* a higher quality image at the expense of slower inference.
4040
*/
41-
numInferenceSteps?: number;
41+
num_inference_steps?: number;
4242
/**
4343
* The size in pixel of the output image
4444
*/
45-
targetSize?: TargetSize;
45+
target_size?: TargetSize;
4646
[property: string]: unknown;
4747
}
4848

packages/tasks/src/tasks/image-to-text/inference.ts

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ export interface ImageToTextParameters {
3232
/**
3333
* The amount of maximum tokens to generate.
3434
*/
35-
maxNewTokens?: number;
35+
max_new_tokens?: number;
3636
[property: string]: unknown;
3737
}
3838

@@ -45,18 +45,18 @@ export interface GenerationParameters {
4545
/**
4646
* Whether to use sampling instead of greedy decoding when generating new tokens.
4747
*/
48-
doSample?: boolean;
48+
do_sample?: boolean;
4949
/**
5050
* Controls the stopping condition for beam-based methods.
5151
*/
52-
earlyStopping?: EarlyStoppingUnion;
52+
early_stopping?: EarlyStoppingUnion;
5353
/**
5454
* If set to float strictly between 0 and 1, only tokens with a conditional probability
5555
* greater than epsilon_cutoff will be sampled. In the paper, suggested values range from
5656
* 3e-4 to 9e-4, depending on the size of the model. See [Truncation Sampling as Language
5757
* Model Desmoothing](https://hf.co/papers/2210.15191) for more details.
5858
*/
59-
epsilonCutoff?: number;
59+
epsilon_cutoff?: number;
6060
/**
6161
* Eta sampling is a hybrid of locally typical sampling and epsilon sampling. If set to
6262
* float strictly between 0 and 1, a token is only considered if it is greater than either
@@ -66,62 +66,62 @@ export interface GenerationParameters {
6666
* See [Truncation Sampling as Language Model Desmoothing](https://hf.co/papers/2210.15191)
6767
* for more details.
6868
*/
69-
etaCutoff?: number;
69+
eta_cutoff?: number;
7070
/**
7171
* The maximum length (in tokens) of the generated text, including the input.
7272
*/
73-
maxLength?: number;
73+
max_length?: number;
7474
/**
7575
* The maximum number of tokens to generate. Takes precedence over maxLength.
7676
*/
77-
maxNewTokens?: number;
77+
max_new_tokens?: number;
7878
/**
7979
* The minimum length (in tokens) of the generated text, including the input.
8080
*/
81-
minLength?: number;
81+
min_length?: number;
8282
/**
8383
* The minimum number of tokens to generate. Takes precedence over maxLength.
8484
*/
85-
minNewTokens?: number;
85+
min_new_tokens?: number;
8686
/**
8787
* Number of groups to divide num_beams into in order to ensure diversity among different
8888
* groups of beams. See [this paper](https://hf.co/papers/1610.02424) for more details.
8989
*/
90-
numBeamGroups?: number;
90+
num_beam_groups?: number;
9191
/**
9292
* Number of beams to use for beam search.
9393
*/
94-
numBeams?: number;
94+
num_beams?: number;
9595
/**
9696
* The value balances the model confidence and the degeneration penalty in contrastive
9797
* search decoding.
9898
*/
99-
penaltyAlpha?: number;
99+
penalty_alpha?: number;
100100
/**
101101
* The value used to modulate the next token probabilities.
102102
*/
103103
temperature?: number;
104104
/**
105105
* The number of highest probability vocabulary tokens to keep for top-k-filtering.
106106
*/
107-
topK?: number;
107+
top_k?: number;
108108
/**
109109
* If set to float < 1, only the smallest set of most probable tokens with probabilities
110110
* that add up to top_p or higher are kept for generation.
111111
*/
112-
topP?: number;
112+
top_p?: number;
113113
/**
114114
* Local typicality measures how similar the conditional probability of predicting a target
115115
* token next is to the expected conditional probability of predicting a random token next,
116116
* given the partial text already generated. If set to float < 1, the smallest set of the
117117
* most locally typical tokens with probabilities that add up to typical_p or higher are
118118
* kept for generation. See [this paper](https://hf.co/papers/2202.00666) for more details.
119119
*/
120-
typicalP?: number;
120+
typical_p?: number;
121121
/**
122122
* Whether the model should use the past last key/values attentions to speed up decoding
123123
*/
124-
useCache?: boolean;
124+
use_cache?: boolean;
125125
[property: string]: unknown;
126126
}
127127

@@ -138,6 +138,6 @@ export interface ImageToTextOutput {
138138
/**
139139
* The generated text.
140140
*/
141-
imageToTextOutputGeneratedText?: string;
141+
generated_text?: string;
142142
[property: string]: unknown;
143143
}

0 commit comments

Comments
 (0)