Skip to content

Refactor index.py into smaller modules #2

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 10 additions & 0 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -106,3 +106,13 @@ Here’s an example of how your project directory might look:
- **`.cursorrules`**: Generated at the root of your project directory.
- **`snapshot.txt`**: Contains the latest system and project snapshots.

## New Module Structure
The `index.py` file has been refactored into smaller, more focused modules:

- `config_loader.py`: Handles configuration loading and validation.
- `command_runner.py`: Handles system and project command execution.
- `snapshot_writer.py`: Handles writing snapshots to files.
- `docker_logs.py`: Handles Docker log retrieval.
- `openai_integration.py`: Handles OpenAI API interaction.

Make sure to update your imports in `index.py` to use these new modules.
39 changes: 39 additions & 0 deletions command_runner.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,39 @@
import subprocess
import os
from datetime import datetime

def run_commands(commands_list, project_dir=None):
print(f"📊 Collecting {'project' if project_dir else 'system'} information...")
output = []
output.append(f"Timestamp: {datetime.utcnow().isoformat()}Z\n")

if project_dir:
original_dir = os.getcwd()
base_project_path = config.get('base_project_path')
if not base_project_path or base_project_path == "<your_project_path>":
raise ValueError(
"❌ Error: 'base_project_path' is not defined or is still set to '<your_project_path>' in the configuration file (config.json). "
"Please update it to your project's root directory."
)

project_path = os.path.join(os.path.expanduser(base_project_path), project_dir)
try:
os.chdir(project_path)
output.append(f"\n### Project: {project_dir} ###\n")
except FileNotFoundError:
print(f" ❌ Project directory not found: {project_path}")
return ""

for command in commands_list:
print(f" ⚡ Running: {command}")
try:
result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
output.append(f"{command}:\n{result.decode('utf-8')}\n")
except subprocess.CalledProcessError as e:
print(f" ❌ Failed: {command}")
output.append(f"{command} (FAILED):\n{e.output.decode('utf-8')}\n")

if project_dir:
os.chdir(original_dir)

return "\n".join(output)
26 changes: 26 additions & 0 deletions config_loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
import os
import json

def load_config():
try:
script_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(script_dir, 'config.json')
with open(config_file, 'r') as f:
config = json.load(f)

if 'tree' not in config or 'system_commands' not in config:
raise KeyError("Missing required keys in config.json")

return config
except (FileNotFoundError, KeyError):
print("⚠️ config.json not found or invalid, using default settings")
return {
"tree": {
"max_depth": 3,
"ignore_patterns": ["venv", "__pycache__", "node_modules", "build", "public", "dist", ".git"],
"ignore_extensions": ["*.pyc", "*.pyo", "*.pyd", "*.so", "*.dll", "*.class"]
},
"system_commands": {
"disk_usage_threshold": 80
}
}
36 changes: 36 additions & 0 deletions docker_logs.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import subprocess
import os

def get_docker_logs():
try:
ignore_containers = config.get('docker', {}).get('ignore_containers', [])

containers = subprocess.check_output(
"docker ps --format '{{.ID}} {{.Names}}'",
shell=True
).decode('utf-8').strip().split('\n')

logs = []
for container in containers:
if not container:
continue

container_id, container_name = container.split()

if container_name in ignore_containers:
continue

try:
container_logs = subprocess.check_output(
f"docker logs --tail 25 {container_id}",
shell=True
).decode('utf-8')

logs.append(f"\nDocker Logs for {container_name} ({container_id}):\n")
logs.append(container_logs)
except subprocess.CalledProcessError as e:
logs.append(f"\nError getting logs for {container_name}: {str(e)}\n")

return "\n".join(logs)
except subprocess.CalledProcessError as e:
return "Error getting Docker container list"
126 changes: 6 additions & 120 deletions index.py
Original file line number Diff line number Diff line change
@@ -1,36 +1,16 @@
import subprocess
import datetime
from dotenv import load_dotenv
import os
import json
import time
from datetime import datetime

load_dotenv()
from config_loader import load_config
from command_runner import run_commands
from snapshot_writer import write_snapshot
from docker_logs import get_docker_logs
from openai_integration import generate_cursorrules

def load_config():
try:
script_dir = os.path.dirname(os.path.abspath(__file__))
config_file = os.path.join(script_dir, 'config.json')
with open(config_file, 'r') as f:
config = json.load(f)

if 'tree' not in config or 'system_commands' not in config:
raise KeyError("Missing required keys in config.json")

return config
except (FileNotFoundError, KeyError):
print("⚠️ config.json not found or invalid, using default settings")
return {
"tree": {
"max_depth": 3,
"ignore_patterns": ["venv", "__pycache__", "node_modules", "build", "public", "dist", ".git"],
"ignore_extensions": ["*.pyc", "*.pyo", "*.pyd", "*.so", "*.dll", "*.class"]
},
"system_commands": {
"disk_usage_threshold": 80
}
}
load_dotenv()

config = load_config()

Expand Down Expand Up @@ -70,100 +50,6 @@ def get_project_directories():
print("❌ Error: containers-list.md not found in .cursorboost directory")
return []

def run_commands(commands_list, project_dir=None):
print(f"📊 Collecting {'project' if project_dir else 'system'} information...")
output = []
output.append(f"Timestamp: {datetime.utcnow().isoformat()}Z\n")

if project_dir:
original_dir = os.getcwd()
base_project_path = config.get('base_project_path')
if not base_project_path or base_project_path == "<your_project_path>":
raise ValueError(
"❌ Error: 'base_project_path' is not defined or is still set to '<your_project_path>' in the configuration file (config.json). "
"Please update it to your project's root directory."
)

project_path = os.path.join(os.path.expanduser(base_project_path), project_dir)
try:
os.chdir(project_path)
output.append(f"\n### Project: {project_dir} ###\n")
except FileNotFoundError:
print(f" ❌ Project directory not found: {project_path}")
return ""

for command in commands_list:
print(f" ⚡ Running: {command}")
try:
result = subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)
output.append(f"{command}:\n{result.decode('utf-8')}\n")
except subprocess.CalledProcessError as e:
print(f" ❌ Failed: {command}")
output.append(f"{command} (FAILED):\n{e.output.decode('utf-8')}\n")

if project_dir:
os.chdir(original_dir)

return "\n".join(output)

def write_snapshot(snapshot):
with open("snapshot.txt", "w") as f:
f.write(snapshot)

import openai

def generate_cursorrules(snapshot):
api_key = os.getenv('OPENAI_API_KEY')
if not api_key:
print("❌ Error: OPENAI_API_KEY not found in environment variables")
return None

client = openai.OpenAI(api_key=api_key)

response = client.chat.completions.create(
model="chatgpt-4o-latest",
messages=[
{"role": "system", "content": "You are a helpful assistant that injects a user's system information and parses out the most important details. Only respond with the complete file contents, without any additional explanation or commentary."},
{"role": "user", "content": f"You are a coding assistant optimizing text files for LLM-based applications. Using the following system snapshot, generate a text file that highlights the most relevant details for coding context. Only output the complete file contents, without explanations or extra text:\n\n{snapshot}"}
],
temperature=1,
)
return response.choices[0].message.content

def get_docker_logs():
try:
ignore_containers = config.get('docker', {}).get('ignore_containers', [])

containers = subprocess.check_output(
"docker ps --format '{{.ID}} {{.Names}}'",
shell=True
).decode('utf-8').strip().split('\n')

logs = []
for container in containers:
if not container:
continue

container_id, container_name = container.split()

if container_name in ignore_containers:
continue

try:
container_logs = subprocess.check_output(
f"docker logs --tail 25 {container_id}",
shell=True
).decode('utf-8')

logs.append(f"\nDocker Logs for {container_name} ({container_id}):\n")
logs.append(container_logs)
except subprocess.CalledProcessError as e:
logs.append(f"\nError getting logs for {container_name}: {str(e)}\n")

return "\n".join(logs)
except subprocess.CalledProcessError as e:
return "Error getting Docker container list"

def write_cursorrules(cursorrules):
project_description = ""
try:
Expand Down
20 changes: 20 additions & 0 deletions openai_integration.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import os
import openai

def generate_cursorrules(snapshot):
api_key = os.getenv('OPENAI_API_KEY')
if not api_key:
print("❌ Error: OPENAI_API_KEY not found in environment variables")
return None

client = openai.OpenAI(api_key=api_key)

response = client.chat.completions.create(
model="chatgpt-4o-latest",
messages=[
{"role": "system", "content": "You are a helpful assistant that injects a user's system information and parses out the most important details. Only respond with the complete file contents, without any additional explanation or commentary."},
{"role": "user", "content": f"You are a coding assistant optimizing text files for LLM-based applications. Using the following system snapshot, generate a text file that highlights the most relevant details for coding context. Only output the complete file contents, without explanations or extra text:\n\n{snapshot}"}
],
temperature=1,
)
return response.choices[0].message.content
5 changes: 5 additions & 0 deletions snapshot_writer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,5 @@
import os

def write_snapshot(snapshot):
with open("snapshot.txt", "w") as f:
f.write(snapshot)