Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Original file line number Diff line number Diff line change
@@ -0,0 +1,72 @@
import os

import openai
from dotenv import load_dotenv


load_dotenv()


class StockAnalysis:
"""Class for analyse financial information"""

openai.api_key = os.getenv("OPENAI_API_KEY")

@classmethod
def analyse_media(cls, ticker: str, news: str, transcriptions: str) -> str:
"""Financial Analyse with using information for ticker from internet"""
completion = openai.chat.completions.create(
model="gpt-4o-mini",
store=True,
messages=[
{"role": "user",
"content": f"You are a financial analyst. "
f"Read an news {news}, and call transcriptions {transcriptions} "
f"(considering that the most recent ones are the priority - look at 'Date:' "
f"in every article )"
f"about public company {ticker} and give answer on questions: "
f"1.'Dou you recommend to buy tickers of this company in public exchange? "
f"Write only two words in top of answer: "
f"'BUY {ticker}' - if you recommend to buy; "
f"'SELL {ticker}' - if you recommend to sell; "
f"'HOLD {ticker}' - if you recommend to hold."
f"2.'Why you recommend?'"
f"Briefly explain the key points:"
f"2.1. Main points of the analysis"
f"2.2. Price forecasts"
f"2.3. Recommendations (buy/hold/sell)"
f"2.4. Key risks"
}
]
)

return completion.choices[0].message.content

@classmethod
def analyse_ticker(cls, ticker: str) -> str:
"""Financial Analyse without information for ticker from internet"""
completion = openai.chat.completions.create(
model="gpt-4o-mini",
store=True,
messages=[
{"role": "user",
"content": f"You are a financial analyst. "
f"Read an public company {ticker}. "
f"If company not exists - write 'There are no company with ticker {ticker}.'"
f"Else write answers for next two questions: "
f"1. 'Dou you recommend to buy tickers of this company in public exchange? "
f"Write only two words in top of answer: "
f"'BUY {ticker}' - if you recommend to buy; "
f"'SELL {ticker}' - if you recommend to sell; "
f"'HOLD {ticker}' - if you recommend to hold."
f"2. 'Why you recommend?'"
f"Briefly explain the key points:"
f"2.1. Main points of the analysis"
f"2.2. Price forecasts"
f"2.3. Recommendations (buy/hold/sell)"
f"2.4. Key risks"
}
]
)

return completion.choices[0].message.content
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
import click

from analysis.stock_analysis import StockAnalysis
from search.analytics_searchers.duck_duck_go_searcher import DuckDuckGoSearch
from search.transcriptions_searcher import TranscriptionsSearcher


@click.command()
def trade_assistant():
"""Interactive mode for continuous analysis"""
click.echo(click.style("\nTrade Assistant Interactive Mode", fg='blue', bold=True))
click.echo(click.style("Type 'exit' or 'quit' to end\n", fg='blue'))

while True:
ticker = click.prompt("Enter stock ticker", type=str).strip().upper()

if ticker in ('EXIT', 'QUIT'):
click.echo("Exiting interactive mode...")
break

# input validation hidden for give opportunity for attackers
# if not ticker.isalpha() or not ticker.isascii():
# click.echo(click.style("Invalid ticker - it must contain only english letters", fg='yellow'))
# continue

try:
click.echo(f"\nProcessing {ticker}...")

click.echo(f"\nSearch Stock News for {ticker}...")
searcher = DuckDuckGoSearch()
news = searcher.get_news(ticker)
if not news:
click.echo(f"\nNo stock news found for ticker: {ticker}...")

click.echo(f"\nSearch Calls Transcriptions for {ticker}...")

transcriptions = TranscriptionsSearcher.get_transcriptions(ticker)
if not transcriptions:
click.echo(f"\nNo calls transcriptions found for ticker: {ticker}...")
# you can use this stub for minimise work time in attack situations
# transcriptions = ""

click.echo(f"\nProcessing Analysis for {ticker}...")

if news or transcriptions:
analysis = StockAnalysis.analyse_media(ticker, news, transcriptions)
else:
click.echo(click.style("IMPORTANT: the analysis was conducted without using data "
"from the media!", fg='red'))
analysis = StockAnalysis.analyse_ticker(ticker)

click.echo("\n" + click.style("ANALYSIS RESULTS", fg='green', bold=True))
click.echo(click.style("=" * 40, fg='green'))

click.echo(analysis)
click.echo(click.style("=" * 40, fg='green') + "\n")

except Exception as e:
click.echo(click.style(f"Error processing {ticker}: {str(e)}", fg='red'))


if __name__ == "__main__":
trade_assistant()
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
aiohappyeyeballs==2.6.1
aiohttp==3.11.15
aiosignal==1.3.2
annotated-types==0.7.0
anyio==4.9.0
attrs==25.3.0
beautifulsoup4==4.13.3
bs4==0.0.2
certifi==2025.1.31
charset-normalizer==3.4.1
click==8.1.8
dataclasses-json==0.6.7
distro==1.9.0
dotenv==0.9.9
duckduckgo_search==8.0.0
frozenlist==1.5.0
google_search_results==2.4.2
greenlet==3.1.1
h11==0.14.0
httpcore==1.0.7
httpx==0.28.1
httpx-sse==0.4.0
idna==3.10
jiter==0.9.0
jsonpatch==1.33
jsonpointer==3.0.0
langchain==0.3.22
langchain-community==0.3.20
langchain-core==0.3.49
langchain-text-splitters==0.3.7
langsmith==0.3.22
lxml==5.3.1
marshmallow==3.26.1
multidict==6.3.0
mypy-extensions==1.0.0
numpy==2.2.4
openai==1.70.0
orjson==3.10.16
packaging==24.2
primp==0.14.0
propcache==0.3.1
pydantic==2.11.1
pydantic-settings==2.8.1
pydantic_core==2.33.0
python-dotenv==1.1.0
PyYAML==6.0.2
requests==2.32.3
requests-toolbelt==1.0.0
serpapi==0.1.5
sniffio==1.3.1
soupsieve==2.6
SQLAlchemy==2.0.40
tenacity==9.0.0
tqdm==4.67.1
typing-inspect==0.9.0
typing-inspection==0.4.0
typing_extensions==4.13.0
urllib3==2.3.0
yarl==1.18.3
zstandard==0.23.0
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
import re
import time
from typing import List, Dict, Optional
from urllib.parse import urlparse

import requests
from bs4 import BeautifulSoup


def extract_date_from_url(url: str) -> Optional[str]:
"""Get date from url"""
# Extract the path from URL
path = urlparse(url).path

# Use regex to find the date pattern (yyyy/mm/dd)
date_match = re.search(r'/(\d{4})/(\d{2})/(\d{2})/', path)

if date_match:
year, month, day = date_match.groups()
# Reformat to dd.mm.yyyy
return f"{day}.{month}.{year}"
else:
return None # or raise an error if date is required


def format_stock_info(stock_data: dict) -> str:
"""Made text output from dict"""
formatted_string = ""

formatted_string += f"Title: {stock_data['title']}\n"
formatted_string += f"Date: {stock_data['date']}\n"
formatted_string += f"URL: {stock_data['url']}\n"

# Format the summary with proper line breaks
content = stock_data['content'].replace('\n', '\n ')
formatted_string += f"Content:\n {content}\n"

formatted_string += "-" * 50 + "\n\n"

return formatted_string


class BaseSearch:
"""Parent class with base search methods"""

@classmethod
def search_web_for_analysis(cls, ticker: str) -> List[Dict[str, str]]:
"""Must be redefined in child classes."""
search_results = []

return search_results

@classmethod
def extract_content(cls, url: str) -> str:
"""Extract content from page"""
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36'
}

response = requests.get(url, headers=headers)
soup = BeautifulSoup(response.text, 'html.parser')

# remove useless tags
for element in soup(['script', 'style', 'nav', 'footer', 'iframe']):
element.decompose()

content = ""
# get content from articles, not from transcriptions
if 'fool.com/investing/' in url:
main_content = soup.find('div', class_='article-body')
content = '\n\n'.join(
[p.get_text().strip() for p in main_content.find_all(['p', 'h2', 'h3']) if p.get_text().strip()])

return content

@classmethod
def get_news(cls, ticker: str) -> str:
"""
Find analytics by ticker

:param ticker: stock ticker (AAPL, MSFT)
:return: list with dicts {title, url, snippet}
"""
news = ""
search_results = cls.search_web_for_analysis(ticker)
for result in search_results:
try:
content = cls.extract_content(result['link'])
if content:
stock_info = {
'title': result['title'],
'date': extract_date_from_url(result['link']),
'url': result['link'],
'content': content
}
news += f"{format_stock_info(stock_info)}"

time.sleep(2)

except Exception as e:
print(f"Error processing {result['link']}: {e}")

return news
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
from dotenv import load_dotenv
from langchain_community.utilities import DuckDuckGoSearchAPIWrapper
from typing import List, Dict

from .base_searcher import BaseSearch


load_dotenv()


class DuckDuckGoSearch(BaseSearch):
"""Class for search in DuckDuckGo"""

search = DuckDuckGoSearchAPIWrapper(
region="us-en",
time="y",
max_results=5,
safesearch="moderate"
)

@classmethod
def search_web_for_analysis(cls, ticker: str) -> List[Dict[str, str]]:
"""Search for stock analysis using DuckDuckGo via LangChain"""
search_results = []
query = (
f"{ticker} stock analysis "
"site:fool.com "
# seekingalpha needs captcha, skip for time
#"OR "site:seekingalpha.com"
)

try:
search_results = cls.search.results(query, 10)
except Exception as e:
print(f"Error searching DuckDuckGo: {e}")

return search_results
Loading