Itsjustamit commited on
Commit
cd40a43
·
verified ·
1 Parent(s): cd35cc5

some updates

Browse files
services/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (647 Bytes). View file
 
services/__pycache__/anthropic_client.cpython-311.pyc ADDED
Binary file (8.49 kB). View file
 
services/__pycache__/elevenlabs_client.cpython-311.pyc ADDED
Binary file (5.94 kB). View file
 
services/__pycache__/gemini_client.cpython-311.pyc ADDED
Binary file (15.2 kB). View file
 
services/__pycache__/modal_flux.cpython-311.pyc ADDED
Binary file (8.71 kB). View file
 
services/__pycache__/openai_client.cpython-311.pyc ADDED
Binary file (5.17 kB). View file
 
services/__pycache__/sambanova_client.cpython-311.pyc ADDED
Binary file (9.99 kB). View file
 
services/anthropic_client.py CHANGED
@@ -15,15 +15,35 @@ class AnthropicClient:
15
  def __init__(self, api_key: str = None):
16
  """Initialize with optional custom API key."""
17
  self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
18
- self.client = anthropic.Anthropic(api_key=self.api_key)
19
- self.async_client = anthropic.AsyncAnthropic(api_key=self.api_key)
 
 
 
 
 
 
 
 
20
  self.model = "claude-sonnet-4-20250514"
21
 
 
 
 
 
22
  async def analyze_emotion(self, user_input: str, system_prompt: str) -> dict:
23
  """
24
  Analyze user's emotional state with nuance.
25
  Returns structured emotion data.
26
  """
 
 
 
 
 
 
 
 
27
  response = await self.async_client.messages.create(
28
  model=self.model,
29
  max_tokens=1024,
@@ -58,6 +78,13 @@ class AnthropicClient:
58
  """
59
  Decide what action Pip should take based on emotional state.
60
  """
 
 
 
 
 
 
 
61
  response = await self.async_client.messages.create(
62
  model=self.model,
63
  max_tokens=512,
@@ -93,6 +120,10 @@ class AnthropicClient:
93
  """
94
  Generate Pip's conversational response with streaming.
95
  """
 
 
 
 
96
  messages = conversation_history or []
97
 
98
  # Add context about current emotional state
@@ -126,6 +157,10 @@ Voice tone: {action.get('voice_tone', 'warm')}
126
  """
127
  Generate a gentle intervention response for concerning emotional states.
128
  """
 
 
 
 
129
  context = f"""
130
  [INTERVENTION NEEDED]
131
  User message: {user_input}
@@ -151,6 +186,9 @@ Do NOT be preachy or clinical.
151
  Generate text response for a given prompt.
152
  Used for summaries and other text generation needs.
153
  """
 
 
 
154
  try:
155
  response = await self.async_client.messages.create(
156
  model=self.model,
 
15
  def __init__(self, api_key: str = None):
16
  """Initialize with optional custom API key."""
17
  self.api_key = api_key or os.getenv("ANTHROPIC_API_KEY")
18
+ self.available = bool(self.api_key)
19
+
20
+ if self.available:
21
+ self.client = anthropic.Anthropic(api_key=self.api_key)
22
+ self.async_client = anthropic.AsyncAnthropic(api_key=self.api_key)
23
+ else:
24
+ self.client = None
25
+ self.async_client = None
26
+ print("⚠️ Anthropic: No API key found - service disabled")
27
+
28
  self.model = "claude-sonnet-4-20250514"
29
 
30
+ def is_available(self) -> bool:
31
+ """Check if the client is available."""
32
+ return self.available
33
+
34
  async def analyze_emotion(self, user_input: str, system_prompt: str) -> dict:
35
  """
36
  Analyze user's emotional state with nuance.
37
  Returns structured emotion data.
38
  """
39
+ if not self.available or not self.async_client:
40
+ return {
41
+ "primary_emotions": ["neutral"],
42
+ "intensity": 5,
43
+ "pip_expression": "neutral",
44
+ "intervention_needed": False
45
+ }
46
+
47
  response = await self.async_client.messages.create(
48
  model=self.model,
49
  max_tokens=1024,
 
78
  """
79
  Decide what action Pip should take based on emotional state.
80
  """
81
+ if not self.available or not self.async_client:
82
+ return {
83
+ "action": "reflect",
84
+ "image_style": "gentle",
85
+ "voice_tone": "warm"
86
+ }
87
+
88
  response = await self.async_client.messages.create(
89
  model=self.model,
90
  max_tokens=512,
 
120
  """
121
  Generate Pip's conversational response with streaming.
122
  """
123
+ if not self.available or not self.async_client:
124
+ yield "I'm here with you. Let me think about what you shared..."
125
+ return
126
+
127
  messages = conversation_history or []
128
 
129
  # Add context about current emotional state
 
157
  """
158
  Generate a gentle intervention response for concerning emotional states.
159
  """
160
+ if not self.available or not self.async_client:
161
+ yield "I hear you, and I want you to know that what you're feeling matters. Take a moment to breathe..."
162
+ return
163
+
164
  context = f"""
165
  [INTERVENTION NEEDED]
166
  User message: {user_input}
 
186
  Generate text response for a given prompt.
187
  Used for summaries and other text generation needs.
188
  """
189
+ if not self.available or not self.async_client:
190
+ return ""
191
+
192
  try:
193
  response = await self.async_client.messages.create(
194
  model=self.model,
services/elevenlabs_client.py CHANGED
@@ -54,13 +54,23 @@ class ElevenLabsClient:
54
  }
55
 
56
  def __init__(self):
57
- self.client = AsyncElevenLabs(
58
- api_key=os.getenv("ELEVENLABS_API_KEY")
59
- )
 
 
 
 
 
 
60
  # Default voice - can be customized or created via Voice Design
61
  self.default_voice_id = "21m00Tcm4TlvDq8ikWAM" # Rachel - warm, friendly
62
  self.pip_voice_id = None # Will be set if custom voice is created
63
 
 
 
 
 
64
  async def speak(
65
  self,
66
  text: str,
@@ -71,6 +81,9 @@ class ElevenLabsClient:
71
  Generate speech from text with emotional tone matching.
72
  Returns audio bytes (mp3).
73
  """
 
 
 
74
  try:
75
  model = self.MODELS["flash"] if use_fast_model else self.MODELS["expressive"]
76
  voice_settings = self.TONE_SETTINGS.get(tone, self.TONE_SETTINGS["warm"])
@@ -106,6 +119,9 @@ class ElevenLabsClient:
106
  Stream audio generation for lower latency.
107
  Yields audio chunks as they're generated.
108
  """
 
 
 
109
  try:
110
  model = self.MODELS["flash"]
111
  voice_settings = self.TONE_SETTINGS.get(tone, self.TONE_SETTINGS["warm"])
@@ -130,6 +146,9 @@ class ElevenLabsClient:
130
  """
131
  Get list of available voices.
132
  """
 
 
 
133
  try:
134
  voices = await self.client.voices.get_all()
135
  return [{"id": v.voice_id, "name": v.name} for v in voices.voices]
 
54
  }
55
 
56
  def __init__(self):
57
+ api_key = os.getenv("ELEVENLABS_API_KEY")
58
+ self.available = bool(api_key)
59
+
60
+ if self.available:
61
+ self.client = AsyncElevenLabs(api_key=api_key)
62
+ else:
63
+ self.client = None
64
+ print("⚠️ ElevenLabs: No API key found - voice disabled")
65
+
66
  # Default voice - can be customized or created via Voice Design
67
  self.default_voice_id = "21m00Tcm4TlvDq8ikWAM" # Rachel - warm, friendly
68
  self.pip_voice_id = None # Will be set if custom voice is created
69
 
70
+ def is_available(self) -> bool:
71
+ """Check if the client is available."""
72
+ return self.available
73
+
74
  async def speak(
75
  self,
76
  text: str,
 
81
  Generate speech from text with emotional tone matching.
82
  Returns audio bytes (mp3).
83
  """
84
+ if not self.available or not self.client:
85
+ return None
86
+
87
  try:
88
  model = self.MODELS["flash"] if use_fast_model else self.MODELS["expressive"]
89
  voice_settings = self.TONE_SETTINGS.get(tone, self.TONE_SETTINGS["warm"])
 
119
  Stream audio generation for lower latency.
120
  Yields audio chunks as they're generated.
121
  """
122
+ if not self.available or not self.client:
123
+ return
124
+
125
  try:
126
  model = self.MODELS["flash"]
127
  voice_settings = self.TONE_SETTINGS.get(tone, self.TONE_SETTINGS["warm"])
 
146
  """
147
  Get list of available voices.
148
  """
149
+ if not self.available or not self.client:
150
+ return []
151
+
152
  try:
153
  voices = await self.client.voices.get_all()
154
  return [{"id": v.voice_id, "name": v.name} for v in voices.voices]
services/gemini_client.py CHANGED
@@ -27,14 +27,22 @@ class GeminiClient:
27
  def __init__(self, api_key: str = None):
28
  """Initialize with optional custom API key."""
29
  self.api_key = api_key or os.getenv("GOOGLE_API_KEY")
30
- if self.api_key:
 
 
31
  genai.configure(api_key=self.api_key)
 
 
32
 
33
  # Model instances (lazy loaded)
34
  self._fast_model = None
35
  self._pro_model = None
36
  self._image_model = None
37
 
 
 
 
 
38
  def _get_fast_model(self):
39
  """Get fast model for quick responses."""
40
  if self._fast_model is None:
@@ -62,6 +70,14 @@ class GeminiClient:
62
  Analyze emotional content of user input.
63
  Returns structured emotion analysis.
64
  """
 
 
 
 
 
 
 
 
65
  try:
66
  model = self._get_pro_model()
67
 
@@ -149,6 +165,9 @@ Respond with ONLY valid JSON, no markdown."""
149
  Generate a quick acknowledgment (< 500ms target).
150
  Uses the fastest available model.
151
  """
 
 
 
152
  try:
153
  model = self._get_fast_model()
154
 
@@ -269,6 +288,9 @@ Respond with care, warmth, and appropriate resources if needed."""
269
  """
270
  Generate text (for prompts, summaries, etc).
271
  """
 
 
 
272
  try:
273
  model = self._get_pro_model()
274
  response = await model.generate_content_async(
@@ -323,6 +345,9 @@ Generate the enhanced image prompt only, no explanation."""
323
  Generate an image using Gemini's image generation model.
324
  Returns base64 encoded image.
325
  """
 
 
 
326
  try:
327
  model = self._get_image_model()
328
 
 
27
  def __init__(self, api_key: str = None):
28
  """Initialize with optional custom API key."""
29
  self.api_key = api_key or os.getenv("GOOGLE_API_KEY")
30
+ self.available = bool(self.api_key)
31
+
32
+ if self.available:
33
  genai.configure(api_key=self.api_key)
34
+ else:
35
+ print("⚠️ Gemini: No API key found - service disabled")
36
 
37
  # Model instances (lazy loaded)
38
  self._fast_model = None
39
  self._pro_model = None
40
  self._image_model = None
41
 
42
+ def is_available(self) -> bool:
43
+ """Check if the client is available."""
44
+ return self.available
45
+
46
  def _get_fast_model(self):
47
  """Get fast model for quick responses."""
48
  if self._fast_model is None:
 
70
  Analyze emotional content of user input.
71
  Returns structured emotion analysis.
72
  """
73
+ if not self.available:
74
+ return {
75
+ "primary_emotions": ["neutral"],
76
+ "intensity": 5,
77
+ "pip_expression": "neutral",
78
+ "intervention_needed": False
79
+ }
80
+
81
  try:
82
  model = self._get_pro_model()
83
 
 
165
  Generate a quick acknowledgment (< 500ms target).
166
  Uses the fastest available model.
167
  """
168
+ if not self.available:
169
+ return "I hear you..."
170
+
171
  try:
172
  model = self._get_fast_model()
173
 
 
288
  """
289
  Generate text (for prompts, summaries, etc).
290
  """
291
+ if not self.available:
292
+ return None
293
+
294
  try:
295
  model = self._get_pro_model()
296
  response = await model.generate_content_async(
 
345
  Generate an image using Gemini's image generation model.
346
  Returns base64 encoded image.
347
  """
348
+ if not self.available:
349
+ return None
350
+
351
  try:
352
  model = self._get_image_model()
353
 
services/modal_flux.py CHANGED
@@ -34,6 +34,14 @@ class ModalFluxClient:
34
  def __init__(self):
35
  self.hf_token = os.getenv("HF_TOKEN")
36
  self.modal_endpoint = os.getenv("MODAL_FLUX_ENDPOINT") # If deployed
 
 
 
 
 
 
 
 
37
 
38
  async def generate_image(
39
  self,
 
34
  def __init__(self):
35
  self.hf_token = os.getenv("HF_TOKEN")
36
  self.modal_endpoint = os.getenv("MODAL_FLUX_ENDPOINT") # If deployed
37
+ self.available = bool(self.hf_token) or bool(self.modal_endpoint)
38
+
39
+ if not self.available:
40
+ print("⚠️ HuggingFace/Modal: No tokens found - image generation limited")
41
+
42
+ def is_available(self) -> bool:
43
+ """Check if the client is available."""
44
+ return self.available
45
 
46
  async def generate_image(
47
  self,
services/openai_client.py CHANGED
@@ -23,6 +23,9 @@ class OpenAIClient:
23
  Generate an image using GPT-4o / DALL-E 3.
24
  Returns base64 encoded image or URL.
25
  """
 
 
 
26
  try:
27
  response = await self.client.images.generate(
28
  model="dall-e-3",
@@ -42,6 +45,9 @@ class OpenAIClient:
42
  """
43
  Transcribe audio using Whisper.
44
  """
 
 
 
45
  try:
46
  with open(audio_file_path, "rb") as audio_file:
47
  response = await self.client.audio.transcriptions.create(
@@ -58,6 +64,9 @@ class OpenAIClient:
58
  """
59
  Transcribe audio from bytes using Whisper.
60
  """
 
 
 
61
  try:
62
  # Create a file-like object from bytes
63
  response = await self.client.audio.transcriptions.create(
 
23
  Generate an image using GPT-4o / DALL-E 3.
24
  Returns base64 encoded image or URL.
25
  """
26
+ if not self.available or not self.client:
27
+ return None
28
+
29
  try:
30
  response = await self.client.images.generate(
31
  model="dall-e-3",
 
45
  """
46
  Transcribe audio using Whisper.
47
  """
48
+ if not self.available or not self.client:
49
+ return ""
50
+
51
  try:
52
  with open(audio_file_path, "rb") as audio_file:
53
  response = await self.client.audio.transcriptions.create(
 
64
  """
65
  Transcribe audio from bytes using Whisper.
66
  """
67
+ if not self.available or not self.client:
68
+ return ""
69
+
70
  try:
71
  # Create a file-like object from bytes
72
  response = await self.client.audio.transcriptions.create(
services/sambanova_client.py CHANGED
@@ -14,10 +14,18 @@ class SambanovaClient:
14
  """SambaNova-powered fast inference for Pip."""
15
 
16
  def __init__(self):
17
- self.client = AsyncOpenAI(
18
- api_key=os.getenv("SAMBANOVA_API_KEY"),
19
- base_url=os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
20
- )
 
 
 
 
 
 
 
 
21
  # Using Llama 3.1 or DeepSeek on SambaNova
22
  self.model = "Meta-Llama-3.1-8B-Instruct"
23
  self._rate_limited = False
@@ -43,7 +51,9 @@ class SambanovaClient:
43
  Generate a quick acknowledgment while heavier processing happens.
44
  This should be FAST - just a brief "I hear you" type response.
45
  """
46
- # If rate limited, return a fallback
 
 
47
  if await self._check_rate_limit():
48
  return "I hear you..."
49
 
@@ -75,10 +85,14 @@ class SambanovaClient:
75
  Transform user context into a detailed, vivid image prompt.
76
  This is where user-specific imagery is crafted.
77
  """
78
- # If rate limited, return a simple prompt
 
 
 
 
 
79
  if await self._check_rate_limit():
80
- emotions = emotion_state.get('primary_emotions', ['peaceful'])
81
- return f"A beautiful, calming scene representing {emotions[0] if emotions else 'peace'}, soft colors, dreamy atmosphere"
82
 
83
  context = f"""
84
  User said: "{user_input}"
@@ -119,7 +133,10 @@ Generate a vivid, specific image prompt based on THIS user's context.
119
  Generate conversational response with streaming.
120
  Used for load-balanced conversation when Claude is busy.
121
  """
122
- # If rate limited, yield a fallback
 
 
 
123
  if await self._check_rate_limit():
124
  yield "I understand how you're feeling. Let me take a moment to think about this..."
125
  return
@@ -159,14 +176,18 @@ User said: {user_input}
159
  """
160
  import json
161
 
162
- # If rate limited, return basic analysis
 
 
 
 
 
 
 
 
 
163
  if await self._check_rate_limit():
164
- return {
165
- "primary_emotions": ["neutral"],
166
- "intensity": 5,
167
- "pip_expression": "neutral",
168
- "intervention_needed": False
169
- }
170
 
171
  try:
172
  response = await self.client.chat.completions.create(
 
14
  """SambaNova-powered fast inference for Pip."""
15
 
16
  def __init__(self):
17
+ api_key = os.getenv("SAMBANOVA_API_KEY")
18
+ self.available = bool(api_key)
19
+
20
+ if self.available:
21
+ self.client = AsyncOpenAI(
22
+ api_key=api_key,
23
+ base_url=os.getenv("SAMBANOVA_BASE_URL", "https://api.sambanova.ai/v1")
24
+ )
25
+ else:
26
+ self.client = None
27
+ print("⚠️ SambaNova: No API key found - service disabled")
28
+
29
  # Using Llama 3.1 or DeepSeek on SambaNova
30
  self.model = "Meta-Llama-3.1-8B-Instruct"
31
  self._rate_limited = False
 
51
  Generate a quick acknowledgment while heavier processing happens.
52
  This should be FAST - just a brief "I hear you" type response.
53
  """
54
+ # If not available or rate limited, return a fallback
55
+ if not self.available or not self.client:
56
+ return "I hear you..."
57
  if await self._check_rate_limit():
58
  return "I hear you..."
59
 
 
85
  Transform user context into a detailed, vivid image prompt.
86
  This is where user-specific imagery is crafted.
87
  """
88
+ emotions = emotion_state.get('primary_emotions', ['peaceful'])
89
+ fallback = f"A beautiful, calming scene representing {emotions[0] if emotions else 'peace'}, soft colors, dreamy atmosphere"
90
+
91
+ # If not available or rate limited, return a simple prompt
92
+ if not self.available or not self.client:
93
+ return fallback
94
  if await self._check_rate_limit():
95
+ return fallback
 
96
 
97
  context = f"""
98
  User said: "{user_input}"
 
133
  Generate conversational response with streaming.
134
  Used for load-balanced conversation when Claude is busy.
135
  """
136
+ # If not available or rate limited, yield a fallback
137
+ if not self.available or not self.client:
138
+ yield "I understand how you're feeling. Let me take a moment to think about this..."
139
+ return
140
  if await self._check_rate_limit():
141
  yield "I understand how you're feeling. Let me take a moment to think about this..."
142
  return
 
176
  """
177
  import json
178
 
179
+ default_response = {
180
+ "primary_emotions": ["neutral"],
181
+ "intensity": 5,
182
+ "pip_expression": "neutral",
183
+ "intervention_needed": False
184
+ }
185
+
186
+ # If not available or rate limited, return basic analysis
187
+ if not self.available or not self.client:
188
+ return default_response
189
  if await self._check_rate_limit():
190
+ return default_response
 
 
 
 
 
191
 
192
  try:
193
  response = await self.client.chat.completions.create(