Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
54 changes: 38 additions & 16 deletions deepgram/clients/analyze/v1/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
# Use of this source code is governed by a MIT license that can be found in the LICENSE file.
# SPDX-License-Identifier: MIT

from dataclasses import dataclass
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from dataclasses_json import dataclass_json, config

from io import BufferedReader
from typing import Union, Optional
Expand All @@ -20,17 +20,39 @@ class AnalyzeOptions:
https://developers.deepgram.com/reference/text-intelligence-apis
"""

callback: Optional[str] = None
callback_method: Optional[str] = None
custom_intent: Optional[Union[list, str]] = None
custom_intent_mode: Optional[str] = None
custom_topic: Optional[Union[list, str]] = None
custom_topic_mode: Optional[str] = None
intents: Optional[bool] = None
language: Optional[str] = None
sentiment: Optional[bool] = None
summarize: Optional[bool] = None
topics: Optional[bool] = None
callback: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
callback_method: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
custom_intent: Optional[Union[list, str]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
custom_intent_mode: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
custom_topic: Optional[Union[list, str]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
custom_topic_mode: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
intents: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
language: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
sentiment: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
summarize: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
topics: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)

def __getitem__(self, key):
_dict = self.to_dict()
Expand Down Expand Up @@ -69,7 +91,7 @@ class StreamSource:
stream (BufferedReader): A BufferedReader object for reading binary data.
"""

stream: BufferedReader
stream: BufferedReader = None

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -95,7 +117,7 @@ class UrlSource:
url (str): The URL pointing to the hosted file.
"""

url: str
url: str = ""

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -121,7 +143,7 @@ class BufferSource:
buffer (bytes): The binary data.
"""

buffer: bytes
buffer: bytes = b""

def __getitem__(self, key):
_dict = self.to_dict()
Expand Down
70 changes: 30 additions & 40 deletions deepgram/clients/analyze/v1/response.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
@dataclass_json
@dataclass
class AsyncAnalyzeResponse:
request_id: Optional[str] = ""
request_id: str = ""

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -30,9 +30,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class IntentsInfo:
model_uuid: Optional[str] = ""
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
model_uuid: str = ""
input_tokens: int = 0
output_tokens: int = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -45,9 +45,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class SentimentInfo:
model_uuid: Optional[str] = ""
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
model_uuid: str = ""
input_tokens: int = 0
output_tokens: int = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -60,9 +60,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class SummaryInfo:
model_uuid: Optional[str] = ""
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
model_uuid: str = ""
input_tokens: int = 0
output_tokens: int = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -75,9 +75,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class TopicsInfo:
model_uuid: Optional[str] = ""
input_tokens: Optional[int] = 0
output_tokens: Optional[int] = 0
model_uuid: str = ""
input_tokens: int = 0
output_tokens: int = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -90,9 +90,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Metadata:
request_id: Optional[str] = ""
created: Optional[str] = ""
language: Optional[str] = ""
request_id: str = ""
created: str = ""
language: str = ""
intents_info: Optional[IntentsInfo] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
Expand Down Expand Up @@ -125,10 +125,8 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Average:
sentiment: Optional[Sentiment] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
sentiment_score: Optional[float] = 0
sentiment: Sentiment = None
sentiment_score: float = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -143,7 +141,7 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Summary:
text: Optional[str] = ""
text: str = ""

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -156,8 +154,8 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Topic:
topic: Optional[str] = ""
confidence_score: Optional[float] = 0
topic: str = ""
confidence_score: float = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -170,8 +168,8 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Intent:
intent: Optional[str] = ""
confidence_score: Optional[float] = 0
intent: str = ""
confidence_score: float = 0

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -184,9 +182,9 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Segment:
text: Optional[str] = ""
start_word: Optional[int] = 0
end_word: Optional[int] = 0
text: str = ""
start_word: int = 0
end_word: int = 0
sentiment: Optional[Sentiment] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
Expand Down Expand Up @@ -215,12 +213,8 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Sentiments:
segments: Optional[List[Segment]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
average: Optional[Average] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
segments: List[Segment] = None
average: Average = None

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -239,9 +233,7 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Topics:
segments: Optional[List[Segment]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
segments: List[Segment] = None

def __getitem__(self, key):
_dict = self.to_dict()
Expand All @@ -258,9 +250,7 @@ def __str__(self) -> str:
@dataclass_json
@dataclass
class Intents:
segments: Optional[List[Segment]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
segments: List[Segment] = None

def __getitem__(self, key):
_dict = self.to_dict()
Expand Down
116 changes: 86 additions & 30 deletions deepgram/clients/live/v1/options.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,8 @@
# Use of this source code is governed by a MIT license that can be found in the LICENSE file.
# SPDX-License-Identifier: MIT

from dataclasses import dataclass
from dataclasses_json import dataclass_json
from dataclasses import dataclass, field
from dataclasses_json import dataclass_json, config
from typing import List, Optional, Union
import logging, verboselogs

Expand All @@ -18,34 +18,90 @@ class LiveOptions:
https://developers.deepgram.com/reference/streaming
"""

alternatives: Optional[int] = None
callback: Optional[str] = None
callback_method: Optional[str] = None
channels: Optional[int] = None
diarize: Optional[bool] = None
diarize_version: Optional[str] = None
encoding: Optional[str] = None
endpointing: Optional[str] = None
extra: Optional[Union[list, str]] = None
filler_words: Optional[bool] = None
interim_results: Optional[bool] = None
keywords: Optional[str] = None
language: Optional[str] = None
model: Optional[str] = None
multichannel: Optional[bool] = None
numerals: Optional[bool] = None
punctuate: Optional[bool] = None
profanity_filter: Optional[bool] = None
redact: Optional[bool] = None
replace: Optional[str] = None
sample_rate: Optional[int] = None
search: Optional[str] = None
smart_format: Optional[bool] = None
tag: Optional[list] = None
tier: Optional[str] = None
utterance_end_ms: Optional[str] = None
vad_events: Optional[bool] = None
version: Optional[str] = None
alternatives: Optional[int] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
callback: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
callback_method: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
channels: Optional[int] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
diarize: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
diarize_version: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
encoding: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
endpointing: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
extra: Optional[Union[list, str]] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
filler_words: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
interim_results: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
keywords: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
language: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
model: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
multichannel: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
numerals: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
punctuate: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
profanity_filter: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
redact: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
replace: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
sample_rate: Optional[int] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
search: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
smart_format: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
tag: Optional[list] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
tier: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
utterance_end_ms: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
vad_events: Optional[bool] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)
version: Optional[str] = field(
default=None, metadata=config(exclude=lambda f: f is None)
)

def __getitem__(self, key):
_dict = self.to_dict()
Expand Down
Loading