Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view

Large diffs are not rendered by default.

Large diffs are not rendered by default.

214 changes: 214 additions & 0 deletions insecure-api/main-2.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
from fastapi import FastAPI, HTTPException, Header, Request
from typing import Optional
from models import VideoGame, User
from database import video_games, users
import sqlite3
import os
import requests
from fastapi.responses import RedirectResponse

app = FastAPI(
title="Intentionally Insecure Video Game API",
description="An API designed for security education, demonstrating common vulnerabilities.",
version="1.0.0",
contact={
"name": "Your Name",
"email": "[email protected]",
},
)

# Initialize the SQLite database
def init_db():
if not os.path.exists('videogames.db'):
conn = sqlite3.connect('videogames.db')
cursor = conn.cursor()
# Create table
cursor.execute('''
CREATE TABLE video_games (
id INTEGER PRIMARY KEY,
title TEXT NOT NULL,
developer TEXT NOT NULL,
publisher TEXT NOT NULL,
year_published INTEGER NOT NULL,
sales INTEGER NOT NULL
)
''')
# Insert data
for game in video_games:
cursor.execute('''
INSERT INTO video_games (id, title, developer, publisher, year_published, sales)
VALUES (?, ?, ?, ?, ?, ?)
''', (game.id, game.title, game.developer, game.publisher, game.year_published, game.sales))
conn.commit()
conn.close()

# Call the init_db function when the app starts
@app.on_event("startup")
def startup_event():
init_db()

# Public endpoint to get basic video game info
@app.get("/games")
def get_games():
return video_games

# Vulnerable endpoint: No authentication required to get sensitive sales data
@app.get("/games/{game_id}/sales")
def get_game_sales(game_id: int):
# Vulnerability: No authentication or authorization checks (API1:2019 - Broken Object Level Authorization)
for game in video_games:
if game.id == game_id:
return {"title": game.title, "sales": game.sales}
raise HTTPException(status_code=404, detail="Game not found")

# Vulnerable endpoint: Weak authentication and improper authorization
@app.post("/games")
def add_game(game: VideoGame, Authorization: Optional[str] = Header(None)):
# Vulnerability: Token sent in Authorization header without proper validation (API2:2019 - Broken Authentication)
if not Authorization:
raise HTTPException(status_code=401, detail="Authorization header required")

# Extract Bearer token
if not Authorization.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Invalid Authorization header format")
token = Authorization.split(" ")[1]

# Vulnerability: Insecure token handling and authorization (API5:2019 - Broken Function Level Authorization)
for user in users:
if user.token == token:
if user.is_admin:
video_games.append(game)
return {"message": "Game added"}
else:
raise HTTPException(status_code=403, detail="Not authorized")
raise HTTPException(status_code=401, detail="Invalid token")

# Vulnerable endpoint: Exposes sensitive user data
@app.get("/users")
def get_users():
# Vulnerability: Exposes tokens and admin status (API3:2019 - Excessive Data Exposure)
return users

# Additional vulnerable endpoint: No rate limiting implemented
@app.post("/login")
def login(username: str):
# Vulnerability: No rate limiting allows brute-force attacks (API4:2019 - Lack of Resources & Rate Limiting)
for user in users:
if user.username == username:
return {"token": user.token}
raise HTTPException(status_code=404, detail="User not found")

# Additional vulnerable endpoint: Mass assignment
@app.put("/games/{game_id}")
def update_game(game_id: int, updated_game: VideoGame):
# Vulnerability: Mass assignment allows overwriting of unintended fields (API6:2019 - Mass Assignment)
for i, game in enumerate(video_games):
if game.id == game_id:
video_games[i] = updated_game
return {"message": "Game updated"}
raise HTTPException(status_code=404, detail="Game not found")

# Additional vulnerable endpoint: SQL Injection
@app.get("/search")
def search_games(query: str):
# Vulnerability: User input is not sanitized (API8:2019 - Injection)
conn = sqlite3.connect('videogames.db')
cursor = conn.cursor()
try:
sql_query = f"SELECT * FROM video_games WHERE title = '{query}'"
cursor.execute(sql_query)

Check failure

Code scanning / CodeQL

SQL query built from user-controlled sources High

This SQL query depends on a
user-provided value
.

Copilot Autofix

AI 7 months ago

To fix the problem, we should use parameterized queries instead of directly embedding user input into the SQL query string. Parameterized queries ensure that user input is treated as data rather than executable code, thus preventing SQL injection attacks.

In the provided code snippet, we need to modify the SQL query construction and execution to use parameterized queries. Specifically, we will replace the f-string formatted query with a parameterized query using placeholders and pass the user input as a parameter to the execute method.

Suggested changeset 1
insecure-api/main-2.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main-2.py b/insecure-api/main-2.py
--- a/insecure-api/main-2.py
+++ b/insecure-api/main-2.py
@@ -117,4 +117,4 @@
     try:
-        sql_query = f"SELECT * FROM video_games WHERE title = '{query}'"
-        cursor.execute(sql_query)
+        sql_query = "SELECT * FROM video_games WHERE title = ?"
+        cursor.execute(sql_query, (query,))
         rows = cursor.fetchall()
EOF
@@ -117,4 +117,4 @@
try:
sql_query = f"SELECT * FROM video_games WHERE title = '{query}'"
cursor.execute(sql_query)
sql_query = "SELECT * FROM video_games WHERE title = ?"
cursor.execute(sql_query, (query,))
rows = cursor.fetchall()
Copilot is powered by AI and may make mistakes. Always verify output.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Security control: Static Code Analysis Python Semgrep

Sqlalchemy Raw Sql Query Concatenation Risks Sql Injection

Avoiding SQL string concatenation: untrusted input concatenated with raw SQL query can result in SQL Injection. In order to execute raw query safely, prepared statement should be used. SQLAlchemy provides TextualSQL to easily used prepared statement with named parameters. For complex SQL composition, use SQL Expression Language or Schema Definition Language. In most cases, SQLAlchemy ORM will be a better option.

Severity: HIGH

Learn more about this issue


Jit Bot commands and options (e.g., ignore issue)

You can trigger Jit actions by commenting on this PR review:

  • #jit_ignore_fp Ignore and mark this specific single instance of finding as “False Positive”
  • #jit_ignore_accept Ignore and mark this specific single instance of finding as “Accept Risk”
  • #jit_ignore_type_in_file Ignore any finding of type "SQLAlchemy raw SQL query concatenation risks SQL Injection" in insecure-api/main-2.py; future occurrences will also be ignored.
  • #jit_undo_ignore Undo ignore command

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semgrep identified an issue in your code:

Avoiding SQL string concatenation: untrusted input concatenated with raw SQL query can result in SQL Injection. In order to execute raw query safely, prepared statement should be used. SQLAlchemy provides TextualSQL to easily used prepared statement with named parameters. For complex SQL composition, use SQL Expression Language or Schema Definition Language. In most cases, SQLAlchemy ORM will be a better option.

To resolve this comment:

✨ Commit Assistant fix suggestion

Suggested change
cursor.execute(sql_query)
sql_query = "SELECT * FROM video_games WHERE title = ?"
cursor.execute(sql_query, (query,))
View step-by-step instructions
  1. Change the SQL query to use parameterized queries to prevent SQL injection. Replace the line sql_query = f"SELECT * FROM video_games WHERE title = '{query}'" with sql_query = "SELECT * FROM video_games WHERE title = ?".
  2. Pass the query parameter as a tuple to the execute method. Update the cursor.execute(sql_query) line to cursor.execute(sql_query, (query,)).

This change uses SQLite's parameterized query feature, which automatically handles escaping and prevents SQL injection attacks.

💬 Ignore this finding

Reply with Semgrep commands to ignore this finding.

  • /fp <comment> for false positive
  • /ar <comment> for acceptable risk
  • /other <comment> for all other reasons

Alternatively, triage in Semgrep AppSec Platform to ignore the finding created by sqlalchemy-execute-raw-query.

You can view more details about this finding in the Semgrep AppSec Platform.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semgrep identified an issue in your code:

Detected possible formatted SQL query. Use parameterized queries instead.

To resolve this comment:

🔧 No guidance has been designated for this issue. Fix according to your organization's approved methods.

💬 Ignore this finding

Reply with Semgrep commands to ignore this finding.

  • /fp <comment> for false positive
  • /ar <comment> for acceptable risk
  • /other <comment> for all other reasons

Alternatively, triage in Semgrep AppSec Platform to ignore the finding created by formatted-sql-query.

You can view more details about this finding in the Semgrep AppSec Platform.

rows = cursor.fetchall()
except Exception as e:
# Return the exception message for educational purposes (not recommended in production)
return {"error": str(e)}

Check warning

Code scanning / CodeQL

Information exposure through an exception Medium

Stack trace information
flows to this location and may be exposed to an external user.

Copilot Autofix

AI 7 months ago

To fix the problem, we should avoid returning the detailed exception message to the user. Instead, we should log the exception on the server and return a generic error message to the user. This approach ensures that sensitive information is not exposed while still allowing developers to debug issues using the server logs.

  • Modify the code to log the exception message instead of returning it to the user.
  • Return a generic error message to the user indicating that an internal error has occurred.
Suggested changeset 1
insecure-api/main-2.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main-2.py b/insecure-api/main-2.py
--- a/insecure-api/main-2.py
+++ b/insecure-api/main-2.py
@@ -121,4 +121,5 @@
     except Exception as e:
-        # Return the exception message for educational purposes (not recommended in production)
-        return {"error": str(e)}
+        # Log the exception message and return a generic error message to the user
+        print(f"Exception occurred: {e}")  # Replace with proper logging in production
+        return {"error": "An internal error has occurred. Please try again later."}
     finally:
EOF
@@ -121,4 +121,5 @@
except Exception as e:
# Return the exception message for educational purposes (not recommended in production)
return {"error": str(e)}
# Log the exception message and return a generic error message to the user
print(f"Exception occurred: {e}") # Replace with proper logging in production
return {"error": "An internal error has occurred. Please try again later."}
finally:
Copilot is powered by AI and may make mistakes. Always verify output.
finally:
conn.close()
# Convert rows to list of dictionaries
results = []
for row in rows:
results.append({
"id": row[0],
"title": row[1],
"developer": row[2],
"publisher": row[3],
"year_published": row[4],
"sales": row[5],
})
return results

# Additional vulnerable endpoint: Improper assets management
@app.get("/.env")
def get_env():
# Vulnerability: Sensitive files are exposed (API9:2019 - Improper Assets Management)
return {"SECRET_KEY": "supersecretkey"}

# Additional vulnerable endpoint: Insufficient logging and monitoring
@app.post("/admin/delete_game")
def delete_game(game_id: int, Authorization: Optional[str] = Header(None)):
# Vulnerability: Actions are not logged (API10:2019 - Insufficient Logging & Monitoring)
if not Authorization:
raise HTTPException(status_code=401, detail="Authorization header required")

# Extract Bearer token
if not Authorization.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Invalid Authorization header format")
token = Authorization.split(" ")[1]

for user in users:
if user.token == token and user.is_admin:
for i, game in enumerate(video_games):
if game.id == game_id:
deleted_game = video_games.pop(i)
# No logging of the deletion action
return {"message": f"Game '{deleted_game.title}' deleted"}
raise HTTPException(status_code=404, detail="Game not found")
raise HTTPException(status_code=403, detail="Not authorized")

@app.post("/feedback")
def submit_feedback(feedback: str):
# Vulnerability: User input is not sanitized before rendering (API7:2019 - Security Misconfiguration)
response = HTMLResponse(content=f"<html><body><h1>Feedback Received</h1><p>{feedback}</p></body></html>")
return response

# Additional vulnerable endpoint: Insecure Direct Object References (IDOR)
@app.get("/user_profile")
def get_user_profile(user_id: int):
# Vulnerability: No authorization checks (API1:2019 - Broken Object Level Authorization)
for user in users:
if user.username == f"user{user_id}":
return user
raise HTTPException(status_code=404, detail="User not found")

# Additional vulnerable endpoint: Cross-Site Request Forgery (CSRF)
@app.post("/update_profile")
def update_profile(username: str, email: str, Authorization: Optional[str] = Header(None)):
# Vulnerability: No CSRF protection (API5:2019 - Broken Function Level Authorization)
if not Authorization:
raise HTTPException(status_code=401, detail="Authorization header required")
# Extract Bearer token
if not Authorization.startswith("Bearer "):
raise HTTPException(status_code=401, detail="Invalid Authorization header format")
token = Authorization.split(" ")[1]
# Simulate updating user profile
for user in users:
if user.token == token:
user.username = username
user.email = email # Assuming 'email' field exists in User model
return {"message": "Profile updated"}
raise HTTPException(status_code=401, detail="Invalid token")

# Additional vulnerable endpoint: Server-Side Request Forgery (SSRF)
@app.get("/fetch_url")
def fetch_url_content(url: str):
# Vulnerability: No validation of the URL (API10:2019 - Unsafe Consumption of APIs)
try:
response = requests.get(url)

Check failure

Code scanning / CodeQL

Full server-side request forgery Critical

The full URL of this request depends on a
user-provided value
.

Copilot Autofix

AI 7 months ago

To fix the SSRF vulnerability, we need to validate the user-provided URL to ensure it is safe to use. One way to do this is to restrict the URLs to a predefined list of allowed domains or to validate the URL structure and ensure it does not point to any internal or sensitive resources.

The best way to fix this problem without changing existing functionality is to:

  1. Parse the user-provided URL.
  2. Validate the URL to ensure it points to an allowed domain.
  3. Reject or sanitize any URLs that do not meet the validation criteria.

We will use the urllib.parse module to parse and validate the URL.

Suggested changeset 1
insecure-api/main-2.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main-2.py b/insecure-api/main-2.py
--- a/insecure-api/main-2.py
+++ b/insecure-api/main-2.py
@@ -203,3 +203,11 @@
     # Vulnerability: No validation of the URL (API10:2019 - Unsafe Consumption of APIs)
+    from urllib.parse import urlparse
+
+    allowed_domains = ["example.com", "another-allowed-domain.com"]
+
     try:
+        parsed_url = urlparse(url)
+        if parsed_url.netloc not in allowed_domains:
+            raise ValueError("URL domain is not allowed")
+
         response = requests.get(url)
EOF
@@ -203,3 +203,11 @@
# Vulnerability: No validation of the URL (API10:2019 - Unsafe Consumption of APIs)
from urllib.parse import urlparse

allowed_domains = ["example.com", "another-allowed-domain.com"]

try:
parsed_url = urlparse(url)
if parsed_url.netloc not in allowed_domains:
raise ValueError("URL domain is not allowed")

response = requests.get(url)
Copilot is powered by AI and may make mistakes. Always verify output.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semgrep identified an issue in your code:

Untrusted input might be used to build an HTTP request, which can lead to a Server-side request forgery (SSRF) vulnerability. SSRF allows an attacker to send crafted requests from the server side to other internal or external systems. SSRF can lead to unauthorized access to sensitive data and, in some cases, allow the attacker to control applications or systems that trust the vulnerable service. To prevent this vulnerability, avoid allowing user input to craft the base request. Instead, treat it as part of the path or query parameter and encode it appropriately. When user input is necessary to prepare the HTTP request, perform strict input validation. Additionally, whenever possible, use allowlists to only interact with expected, trusted domains.

Dataflow graph
flowchart LR
    classDef invis fill:white, stroke: none
    classDef default fill:#e7f5ff, color:#1c7fd6, stroke: none

    subgraph File0["<b>insecure-api/main-2.py</b>"]
        direction LR
        %% Source

        subgraph Source
            direction LR

            v0["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L202 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 202] url</a>"]
        end
        %% Intermediate

        subgraph Traces0[Traces]
            direction TB

            v2["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L202 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 202] url</a>"]
        end
        %% Sink

        subgraph Sink
            direction LR

            v1["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L205 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 205] url</a>"]
        end
    end
    %% Class Assignment
    Source:::invis
    Sink:::invis

    Traces0:::invis
    File0:::invis

    %% Connections

    Source --> Traces0
    Traces0 --> Sink


Loading

To resolve this comment:

🔧 No guidance has been designated for this issue. Fix according to your organization's approved methods.

💬 Ignore this finding

Reply with Semgrep commands to ignore this finding.

  • /fp <comment> for false positive
  • /ar <comment> for acceptable risk
  • /other <comment> for all other reasons

Alternatively, triage in Semgrep AppSec Platform to ignore the finding created by tainted-fastapi-http-request-requests.

You can view more details about this finding in the Semgrep AppSec Platform.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semgrep identified an issue in your code:

Data from request object is passed to a new server-side request. This could lead to a server-side request forgery (SSRF). To mitigate, ensure that schemes and hosts are validated against an allowlist, do not forward the response to the user, and ensure proper authentication and transport-layer security in the proxied request.

To resolve this comment:

🔧 No guidance has been designated for this issue. Fix according to your organization's approved methods.

💬 Ignore this finding

Reply with Semgrep commands to ignore this finding.

  • /fp <comment> for false positive
  • /ar <comment> for acceptable risk
  • /other <comment> for all other reasons

Alternatively, triage in Semgrep AppSec Platform to ignore the finding created by ssrf-requests.

You can view more details about this finding in the Semgrep AppSec Platform.

return {"content": response.text}
except Exception as e:
return {"error": str(e)}

Check warning

Code scanning / CodeQL

Information exposure through an exception Medium

Stack trace information
flows to this location and may be exposed to an external user.

Copilot Autofix

AI 7 months ago

To fix the problem, we need to modify the exception handling in the fetch_url_content function to ensure that detailed error information is not exposed to the user. Instead, we will log the detailed error message on the server and return a generic error message to the user.

  • We will import the logging module to log the detailed error message.
  • We will modify the exception handling block to log the detailed error message and return a generic error message to the user.
Suggested changeset 1
insecure-api/main-2.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main-2.py b/insecure-api/main-2.py
--- a/insecure-api/main-2.py
+++ b/insecure-api/main-2.py
@@ -8,2 +8,5 @@
 from fastapi.responses import RedirectResponse
+import logging
+
+logging.basicConfig(level=logging.ERROR)
 
@@ -207,3 +210,4 @@
     except Exception as e:
-        return {"error": str(e)}
+        logging.error("Error fetching URL content: %s", str(e))
+        return {"error": "An internal error has occurred. Please try again later."}
 
EOF
@@ -8,2 +8,5 @@
from fastapi.responses import RedirectResponse
import logging

logging.basicConfig(level=logging.ERROR)

@@ -207,3 +210,4 @@
except Exception as e:
return {"error": str(e)}
logging.error("Error fetching URL content: %s", str(e))
return {"error": "An internal error has occurred. Please try again later."}

Copilot is powered by AI and may make mistakes. Always verify output.

# Additional vulnerable endpoint: Unvalidated Redirects and Forwards
@app.get("/redirect")
def unsafe_redirect(next: str):
# Vulnerability: Unvalidated redirect (API10:2019 - Unsafe Consumption of APIs)
return RedirectResponse(url=next)

Check warning

Code scanning / CodeQL

URL redirection from remote source Medium

Untrusted URL redirection depends on a
user-provided value
.

Copilot Autofix

AI 7 months ago

To fix the problem, we need to validate the next parameter before using it in the redirection. One way to do this is to maintain a list of allowed redirect URLs and check if the user-provided URL is in that list. If it is not possible to maintain such a list, we can use the urlparse function from the Python standard library to ensure that the URL does not contain an explicit host name, making it a relative path and thus safe to redirect.

We will implement the fix by:

  1. Importing the urlparse function from the urllib.parse module.
  2. Replacing backslashes with forward slashes in the next parameter.
  3. Checking that the next parameter does not contain an explicit host name and scheme using the urlparse function.
  4. Redirecting to the home page if the next parameter is not safe.
Suggested changeset 1
insecure-api/main-2.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main-2.py b/insecure-api/main-2.py
--- a/insecure-api/main-2.py
+++ b/insecure-api/main-2.py
@@ -8,2 +8,3 @@
 from fastapi.responses import RedirectResponse
+from urllib.parse import urlparse
 
@@ -213,2 +214,5 @@
     # Vulnerability: Unvalidated redirect (API10:2019 - Unsafe Consumption of APIs)
-    return RedirectResponse(url=next)
+    next = next.replace('\\', '/')
+    if not urlparse(next).netloc and not urlparse(next).scheme:
+        return RedirectResponse(url=next)
+    return RedirectResponse(url='/')
EOF
@@ -8,2 +8,3 @@
from fastapi.responses import RedirectResponse
from urllib.parse import urlparse

@@ -213,2 +214,5 @@
# Vulnerability: Unvalidated redirect (API10:2019 - Unsafe Consumption of APIs)
return RedirectResponse(url=next)
next = next.replace('\\', '/')
if not urlparse(next).netloc and not urlparse(next).scheme:
return RedirectResponse(url=next)
return RedirectResponse(url='/')
Copilot is powered by AI and may make mistakes. Always verify output.

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Semgrep identified an issue in your code:

The application builds a URL using user-controlled input which can lead to an open redirect vulnerability. An attacker can manipulate the URL and redirect users to an arbitrary domain. Open redirect vulnerabilities can lead to issues such as Cross-site scripting (XSS) or redirecting to a malicious domain for activities such as phishing to capture users' credentials. To prevent this vulnerability perform strict input validation of the domain against an allowlist of approved domains. Notify a user in your application that they are leaving the website. Display a domain where they are redirected to the user. A user can then either accept or deny the redirect to an untrusted site.

Dataflow graph
flowchart LR
    classDef invis fill:white, stroke: none
    classDef default fill:#e7f5ff, color:#1c7fd6, stroke: none

    subgraph File0["<b>insecure-api/main-2.py</b>"]
        direction LR
        %% Source

        subgraph Source
            direction LR

            v0["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L212 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 212] next</a>"]
        end
        %% Intermediate

        subgraph Traces0[Traces]
            direction TB

            v2["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L212 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 212] next</a>"]
        end
        %% Sink

        subgraph Sink
            direction LR

            v1["<a href=https://github.com/latiotech/insecure-kubernetes-deployments/blob/fa19aee8e986b08cc6246b51ab21c74927f9019d/insecure-api/main-2.py#L214 target=_blank style='text-decoration:none; color:#1c7fd6'>[Line: 214] next</a>"]
        end
    end
    %% Class Assignment
    Source:::invis
    Sink:::invis

    Traces0:::invis
    File0:::invis

    %% Connections

    Source --> Traces0
    Traces0 --> Sink


Loading

To resolve this comment:

🔧 No guidance has been designated for this issue. Fix according to your organization's approved methods.

💬 Ignore this finding

Reply with Semgrep commands to ignore this finding.

  • /fp <comment> for false positive
  • /ar <comment> for acceptable risk
  • /other <comment> for all other reasons

Alternatively, triage in Semgrep AppSec Platform to ignore the finding created by tainted-redirect-fastapi.

You can view more details about this finding in the Semgrep AppSec Platform.

13 changes: 12 additions & 1 deletion insecure-api/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,7 +49,18 @@

# Public endpoint to get basic video game info
@app.get("/games")
def get_games():
def get_games(query: str):
conn = sqlite3.connect('videogames.db')
cursor = conn.cursor()
try:
sql_query = f"SELECT * FROM tiles WHERE title = '{query}'"
cursor.execute(sql_query)

Check failure

Code scanning / CodeQL

SQL query built from user-controlled sources High

This SQL query depends on a
user-provided value
.

Copilot Autofix

AI 7 months ago

To fix the SQL injection vulnerability, we should use parameterized queries instead of directly embedding user input into the SQL query string. Parameterized queries ensure that user input is treated as data and not executable code, thus preventing SQL injection attacks.

In the provided code snippet, we need to modify the get_games function to use parameterized queries. Specifically, we will replace the string formatting in the SQL query with a parameterized query using placeholders.

Suggested changeset 1
insecure-api/main.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main.py b/insecure-api/main.py
--- a/insecure-api/main.py
+++ b/insecure-api/main.py
@@ -55,4 +55,4 @@
     try:
-        sql_query = f"SELECT * FROM tiles WHERE title = '{query}'"
-        cursor.execute(sql_query)
+        sql_query = "SELECT * FROM tiles WHERE title = ?"
+        cursor.execute(sql_query, (query,))
         video_games = cursor.fetchall()
EOF
@@ -55,4 +55,4 @@
try:
sql_query = f"SELECT * FROM tiles WHERE title = '{query}'"
cursor.execute(sql_query)
sql_query = "SELECT * FROM tiles WHERE title = ?"
cursor.execute(sql_query, (query,))
video_games = cursor.fetchall()
Copilot is powered by AI and may make mistakes. Always verify output.
Copy link

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Security control: Static Code Analysis Python Semgrep

Sqlalchemy Raw Sql Query Concatenation Risks Sql Injection

Avoiding SQL string concatenation: untrusted input concatenated with raw SQL query can result in SQL Injection. In order to execute raw query safely, prepared statement should be used. SQLAlchemy provides TextualSQL to easily used prepared statement with named parameters. For complex SQL composition, use SQL Expression Language or Schema Definition Language. In most cases, SQLAlchemy ORM will be a better option.

Severity: HIGH

Learn more about this issue


Jit Bot commands and options (e.g., ignore issue)

You can trigger Jit actions by commenting on this PR review:

  • #jit_ignore_fp Ignore and mark this specific single instance of finding as “False Positive”
  • #jit_ignore_accept Ignore and mark this specific single instance of finding as “Accept Risk”
  • #jit_ignore_type_in_file Ignore any finding of type "SQLAlchemy raw SQL query concatenation risks SQL Injection" in insecure-api/main.py; future occurrences will also be ignored.
  • #jit_undo_ignore Undo ignore command

video_games = cursor.fetchall()
except Exception as e:
# Return the exception message for educational purposes (not recommended in production)
return {"error": str(e)}

Check warning

Code scanning / CodeQL

Information exposure through an exception Medium

Stack trace information
flows to this location and may be exposed to an external user.

Copilot Autofix

AI 7 months ago

To fix the problem, we should avoid returning the exception message directly to the user. Instead, we should log the detailed error message on the server and return a generic error message to the user. This approach ensures that sensitive information is not exposed while still allowing developers to debug issues using the server logs.

  • Modify the exception handling in the get_games function to log the error message and return a generic error message to the user.
  • Add an appropriate logging mechanism to capture the detailed error message.
Suggested changeset 1
insecure-api/main.py

Autofix patch

Autofix patch
Run the following command in your local git repository to apply this patch
cat << 'EOF' | git apply
diff --git a/insecure-api/main.py b/insecure-api/main.py
--- a/insecure-api/main.py
+++ b/insecure-api/main.py
@@ -59,4 +59,5 @@
     except Exception as e:
-        # Return the exception message for educational purposes (not recommended in production)
-        return {"error": str(e)}
+        # Log the exception message for debugging purposes
+        app.logger.error(f"An error occurred: {e}")
+        return {"error": "An internal error has occurred. Please try again later."}
     finally:
EOF
@@ -59,4 +59,5 @@
except Exception as e:
# Return the exception message for educational purposes (not recommended in production)
return {"error": str(e)}
# Log the exception message for debugging purposes
app.logger.error(f"An error occurred: {e}")
return {"error": "An internal error has occurred. Please try again later."}
finally:
Copilot is powered by AI and may make mistakes. Always verify output.
finally:
conn.close()
return video_games

# Vulnerable endpoint: No authentication required to get sensitive sales data
Expand Down
Loading
Loading