Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
12 changes: 10 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@
This project provides a scalable API backend using FastAPI and PostgreSQL, featuring:

- Automatic full-text search on all text fields (via tsvector)
- Endpoints for health checks, product management, prompt handling, and prospect management
- Endpoints for health checks, product management, prompt handling (via `/prompt`), resend email, and prospect management
- Efficient ingestion and processing of large CSV files

#### 🚀 Features
Expand All @@ -21,7 +21,7 @@ This project provides a scalable API backend using FastAPI and PostgreSQL, featu
- **Uvicorn** — Lightning-fast ASGI server
- **Pytest** — Comprehensive testing

#### Getting Started
#### Install & Use

### 1. Clone & Setup Environment

Expand All @@ -42,13 +42,21 @@ uvicorn app.main:app --reload

Visit [localhost:8000](http://localhost:8000) or [onrender](https://nx-ai.onrender.com)


#### API Documentation

FastAPI auto-generates interactive docs:

- [Swagger UI](https://nx-ai.onrender.com/docs)
- [ReDoc](https://nx-ai.onrender.com/redoc)

#### Notable Endpoints

- `GET /health` — Health check
- `GET/POST /prompt` — LLM prompt completion (formerly `/llm`)
- `GET/POST /resend` — Send email via Resend API (see implementation in `app/utils/notify/resend.py`)
- `GET /prospects` — Paginated prospects
- `POST /prospects/process` — Bulk CSV ingestion

## Full-Text Search (tsvector)

Expand Down
2 changes: 1 addition & 1 deletion app/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
"""Python - FastAPI, Postgres, tsvector"""

# Current Version
__version__ = "2.2.2"
__version__ = "2.2.3"
5 changes: 3 additions & 2 deletions app/api/prompt/__init__.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
"""LLM Routes"""
"""Prompt Routes"""

from .prompt import router as llm_router
from .prompt import router as prompt_router
from .linkedin import router as linkedin_router
67 changes: 67 additions & 0 deletions app/api/prompt/linkedin.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
from fastapi import APIRouter, Depends, HTTPException

from app.utils.api_key_auth import get_api_key
from app.utils.db import get_db_connection_direct
from app.utils.make_meta import make_meta

router = APIRouter()


@router.post("/prompt/linkedin")
def linkedin_prompt_success(payload: dict, api_key: str = Depends(get_api_key)) -> dict:
"""POST /prompt/linkedin: return cached completion for linkedinUrl when available."""
linkedin_url = (payload.get("linkedinUrl") or "").strip()
if not linkedin_url:
raise HTTPException(status_code=400, detail="Missing 'linkedinUrl' in request body.")

conn = None
cur = None
try:
conn = get_db_connection_direct()
cur = conn.cursor()
cur.execute(
"""
SELECT id, completion, time, model, data
FROM prompt
WHERE (data->>'linkedinUrl' = %s OR prompt ILIKE %s)
ORDER BY id DESC
LIMIT 1;
""",
(linkedin_url, f"%{linkedin_url}%"),
)
row = cur.fetchone()

if row:
return {
"meta": make_meta("success", "LinkedIn URL already analysed"),
"data": {
"cached": True,
"id": row[0],
"linkedinUrl": linkedin_url,
"completion": row[1],
"time": row[2].isoformat() if row[2] else None,
"model": row[3],
"record_data": row[4],
},
}

return {
"meta": make_meta("warning", "LinkedIn URL not analysed yet"),
"data": {
"cached": False,
"linkedinUrl": linkedin_url,
"completion": None,
},
}
except HTTPException:
raise
except Exception as e:
return {
"meta": make_meta("error", f"DB error: {str(e)}"),
"data": {},
}
finally:
if cur:
cur.close()
if conn:
conn.close()
25 changes: 11 additions & 14 deletions app/api/prompt/prompt.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,8 +21,8 @@ def get_prompt_records(
if prospect_id is not None:
# No pagination for single prospect_id lookup
select_query = """
SELECT id, prompt, completion, duration, time, data, model, prospect_id, search_vector
FROM llm
SELECT id, prompt, completion, duration, time, data, model, prospect_id
FROM prompt
WHERE prospect_id = %s
ORDER BY id DESC
"""
Expand All @@ -38,7 +38,6 @@ def get_prompt_records(
"data": row[5],
"model": row[6],
"prospect_id": row[7],
"search_vector": str(row[8]) if row[8] is not None else None,
}
for row in rows
]
Expand All @@ -58,12 +57,12 @@ def get_prompt_records(
}
else:
offset = (page - 1) * page_size
cur.execute("SELECT COUNT(*) FROM llm;")
cur.execute("SELECT COUNT(*) FROM prompt;")
count_row = cur.fetchone()
total = count_row[0] if count_row and count_row[0] is not None else 0
cur.execute("""
SELECT id, prompt, completion, duration, time, data, model, prospect_id, search_vector
FROM llm
SELECT id, prompt, completion, duration, time, data, model, prospect_id
FROM prompt
ORDER BY id DESC
LIMIT %s OFFSET %s;
""", (page_size, offset))
Expand All @@ -77,13 +76,12 @@ def get_prompt_records(
"data": row[5],
"model": row[6],
"prospect_id": row[7],
"search_vector": str(row[8]) if row[8] is not None else None,
}
for row in cur.fetchall()
]
cur.close()
conn.close()
meta = make_meta("success", f"LLM {len(records)} records (page {page})")
meta = make_meta("success", f"Prompt {len(records)} records (page {page})")
return {
"meta": meta,
"data": {
Expand Down Expand Up @@ -140,22 +138,21 @@ def llm_post(payload: dict) -> dict:
if not completion:
error_details = " | ".join([f"{k}: {v}" for k, v in errors.items()])
raise Exception(f"No available Gemini model succeeded for generate_content with your API key. Details: {error_details}")
# Insert record into llm table
# Insert record into prompt table
record_id = None
try:
import json
from app import __version__
data_blob = json.dumps({"version": __version__})
conn = get_db_connection_direct()
cur = conn.cursor()
# Generate tsvector from prompt and completion
cur.execute(
"""
INSERT INTO llm (prompt, completion, duration, data, model, prospect_id, search_vector)
VALUES (%s, %s, %s, %s, %s, %s, to_tsvector('english', %s || ' ' || %s))
INSERT INTO prompt (prompt, completion, duration, data, model, prospect_id)
VALUES (%s, %s, %s, %s, %s, %s)
RETURNING id;
""",
(prompt, completion, duration, data_blob, used_model, prospect_id, prompt, completion)
(prompt, completion, duration, data_blob, used_model, prospect_id)
)
record_id_row = cur.fetchone()
record_id = record_id_row[0] if record_id_row else None
Expand All @@ -164,7 +161,7 @@ def llm_post(payload: dict) -> dict:
conn.close()
except Exception as db_exc:
# Log DB error but do not fail the API response
logging.error(f"Failed to insert llm record: {db_exc}")
logging.error(f"Failed to insert prompt record: {db_exc}")
meta = make_meta("success", f"Gemini completion received from {used_model}")
return {"meta": meta, "data": {"id": record_id, "prompt": prompt, "completion": completion}}
except Exception as e:
Expand Down
2 changes: 0 additions & 2 deletions app/api/prompt/sql/alter_add_prompt_code.sql

This file was deleted.

2 changes: 0 additions & 2 deletions app/api/prompt/sql/alter_add_prospect_id.sql

This file was deleted.

5 changes: 0 additions & 5 deletions app/api/prompt/sql/alter_add_search_vector.sql

This file was deleted.

2 changes: 0 additions & 2 deletions app/api/prompt/sql/alter_add_type_column.sql

This file was deleted.

2 changes: 1 addition & 1 deletion app/api/prompt/sql/create_table.sql
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@

CREATE TABLE IF NOT EXISTS llm (
CREATE TABLE IF NOT EXISTS prompt (
id SERIAL PRIMARY KEY,
vector vector(1536),
prompt TEXT NOT NULL,
Expand Down
1 change: 1 addition & 0 deletions app/api/prompt/sql/drop_llm_table.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
DROP TABLE IF EXISTS llm;
14 changes: 0 additions & 14 deletions app/api/prompt/sql/empty_llm_table.py

This file was deleted.

16 changes: 0 additions & 16 deletions app/api/prompt/sql/run_alter_add_prompt_code.py

This file was deleted.

16 changes: 0 additions & 16 deletions app/api/prompt/sql/run_alter_add_prospect_id.py

This file was deleted.

17 changes: 0 additions & 17 deletions app/api/prompt/sql/run_alter_add_search_vector.py

This file was deleted.

21 changes: 0 additions & 21 deletions app/api/prompt/sql/run_alter_add_type_column.py

This file was deleted.

23 changes: 11 additions & 12 deletions app/api/prospects/prospects.py
Original file line number Diff line number Diff line change
Expand Up @@ -98,28 +98,27 @@ def prospects_read_one(id: int = Path(..., description="ID of the prospect to re
if row is not None:
columns = [desc[0] for desc in cur.description]
data = dict(zip(columns, row))
# Fetch related llm records
# Fetch related prompt records
try:
from app.utils.db import get_db_connection_direct
llm_conn = get_db_connection_direct()
llm_cur = llm_conn.cursor()
llm_cur.execute("SELECT id, duration, time, data, model, search_vector FROM llm WHERE prospect_id = %s ORDER BY id DESC;", (id,))
llm_records = [
prompt_conn = get_db_connection_direct()
prompt_cur = prompt_conn.cursor()
prompt_cur.execute("SELECT id, duration, time, data, model FROM prompt WHERE prospect_id = %s ORDER BY id DESC;", (id,))
prompt_records = [
{
"id": r[0],
"duration": r[1],
"time": r[2].isoformat() if r[2] else None,
"data": r[3],
"model": r[4],
"search_vector": str(r[5]) if r[5] is not None else None,
}
for r in llm_cur.fetchall()
for r in prompt_cur.fetchall()
]
llm_cur.close()
llm_conn.close()
data["llm_records"] = llm_records
except Exception as llm_exc:
data["llm_records"] = []
prompt_cur.close()
prompt_conn.close()
data["prompt_records"] = prompt_records
except Exception as prompt_exc:
data["prompt_records"] = []
else:
data = None
meta = make_meta("error", f"No prospect found with id {id}")
Expand Down
14 changes: 8 additions & 6 deletions app/api/root.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,23 +22,25 @@ def root() -> dict:
endpoints = [
{"name": "health", "url": f"{base_url}/health"},
{
"name": "Orders°",
"name": "Prompt°",
"endpoints": [
{"name": "list", "url": f"{base_url}/orders"},
{"name": "list", "url": f"{base_url}/prompt"},
{"name": "linkedin", "url": f"{base_url}/prompt/linkedin"},
]
},
{
"name": "Prospects°",
"name": "Orders°",
"endpoints": [
{"name": "list", "url": f"{base_url}/prospects"},
{"name": "list", "url": f"{base_url}/orders"},
]
},
{
"name": "Prompt°",
"name": "Prospects°",
"endpoints": [
{"name": "list", "url": f"{base_url}/prompt"},
{"name": "list", "url": f"{base_url}/prospects"},
]
},

{"name": "Docs", "url": f"{base_url}/docs"},
]
return {"meta": meta, "data": endpoints}
Loading
Loading