Ragfist AI Ultra Full Final Master Code
By Piyush Singh ЁЯТа
import os
import json
import time
import threading
import cv2
import face_recognition
import pyttsx3
import speech_recognition as sr
import openai
import pkg_resources
from cryptography.fernet import Fernet
from kivy.app import App
from kivy.uix.boxlayout import BoxLayout
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.core.window import Window
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse
import subprocess
GPT-4o API Key (Replace with your key)
openai.api_key = "sk-proj-Your-API-Key"
Window.clearcolor = (0.05, 0.05, 0.1, 1)
class OrbAvatar(Widget):
def init(self, **kwargs):
super().init(**kwargs)
self.intensity = 0.4
self.emotion = 'neutral'
Clock.schedule_interval(self.animate_orb, 0.05)
def set_emotion(self, emotion):
self.emotion = emotion
def animate_orb(self, dt):
self.canvas.clear()
colors = {
'happy': (0.3, 1, 0.5),
'angry': (1, 0.2, 0.2),
'sad': (0.2, 0.4, 1),
'neutral': (0.2, 0.7, 1)
}
glow = colors.get(self.emotion, (0.2, 0.7, 1))
self.intensity += 0.02
if self.intensity \> 1: self.intensity = 0.3
with self.canvas:
Color(\*glow, self.intensity)
size = min(self.width, self.height) \* 0.6
Ellipse(pos=(self.center_x - size/2, self.center_y - size/2), size=(size, size))
class RagfistCore(BoxLayout):
def init(self, **kwargs):
super().init(orientation='vertical', padding=20, spacing=10, **kwargs)
self.engine = pyttsx3.init()
self.recognizer = sr.Recognizer()
self.label = Label(text="ЁЯОд Ragfist AI: Voice Activation Ready", font_size=24, color=(1,1,1,1), size_hint=(1, 0.15))
self.orb = OrbAvatar(size_hint=(1, 0.4))
self.add_widget(self.label)
self.add_widget(self.orb)
self.owner_name = "Piyush"
self.owner_image = "owner.jpg"
self.memory_file = "memory.json"
self.key_file = "key.key"
self.user_file = "users.json"
self.permissions = {}
self.active_user = None
self.features = {
"рдмреНрд░рд╛рдЙрдЬрд╝рд░ рдЦреЛрд▓реЛ": self.open_browser,
"рдмреНрд▓реВрдЯреВрде рдСрди": self.bluetooth_on,
"рдмреНрд▓реВрдЯреВрде рдмрдВрдж": self.bluetooth_off,
"рд╡реНрд╣рд╛рдЯреНрд╕рдПрдк рдЦреЛрд▓реЛ": self.open_whatsapp,
"рдпреВрдЯреНрдпреВрдм рдЦреЛрд▓реЛ": self.open_youtube,
"рдЧреИрд▓рд░реА рдЦреЛрд▓реЛ": self.open_gallery,
"рдлрд╛рдЗрд▓реНрд╕ рдЦреЛрд▓реЛ": self.open_files,
"рд╕рдВрдЧреАрдд рдЪрд▓рд╛рдУ": self.open_music,
"рдХреЙрдиреНрдЯреИрдХреНрдЯреНрд╕ рдЦреЛрд▓реЛ": self.open_contacts,
"рдХреЙрд▓ рд▓реЙрдЧ рдЦреЛрд▓реЛ": self.open_call_logs,
"рдПрд╕рдПрдордПрд╕ рдЦреЛрд▓реЛ": self.open_sms,
"рдХреИрд▓реЗрдВрдбрд░ рдЦреЛрд▓реЛ": self.open_calendar,
"рд╕реНрдореГрддрд┐ рдмрддрд╛рдУ": self.show_memory,
"рдпрд╛рдж рджрд┐рд▓рд╛рдУ": self.set_reminder,
"рдЯрд╛рдЗрдорд░ рд╕реЗрдЯ рдХрд░реЛ": self.set_timer,
"рдРрдкреНрд╕ рджрд┐рдЦрд╛рдУ": self.list_installed_apps,
"рдлреАрдЪрд░реНрд╕ рдмрддрд╛рдУ": self.list_features
}
self.load_keys_and_memory()
self.face_verify()
Clock.schedule_once(lambda dt: self.start_voice_loop(), 2)
def speak(self, text):
self.label.text = f"ЁЯза {text}"
self.engine.say(text)
self.engine.runAndWait()
def detect_emotion(self, text):
if any(w in text for w in \["рдЦреБрд╢", "рдордЬрд╛", "рдмрдврд╝рд┐рдпрд╛"\]): return 'happy'
if any(w in text for w in \["рдЧреБрд╕реНрд╕рд╛", "рдирд╛рд░рд╛рдЬрд╝"\]): return 'angry'
if any(w in text for w in \["рдердХрд╛", "рдиреАрдВрдж"\]): return 'sad'
return 'neutral'
def load_keys_and_memory(self):
if not os.path.exists(self.key_file):
self.key = Fernet.generate_key()
with open(self.key_file, "wb") as f: f.write(self.key)
else:
with open(self.key_file, "rb") as f: self.key = f.read()
self.fernet = Fernet(self.key)
if os.path.exists(self.memory_file):
try:
with open(self.memory_file, "rb") as f:
self.memory = json.loads(self.fernet.decrypt(f.read()).decode())
except: self.memory = {}
else: self.memory = {}
if os.path.exists(self.user_file):
try:
with open(self.user_file, "rb") as f:
self.users = json.loads(self.fernet.decrypt(f.read()).decode())
except: self.users = {}
else: self.users = {}
def save_memory(self):
with open(self.memory_file, "wb") as f:
f.write(self.fernet.encrypt(json.dumps(self.memory).encode()))
def save_users(self):
with open(self.user_file, "wb") as f:
f.write(self.fernet.encrypt(json.dumps(self.users).encode()))
def face_verify(self):
self.speak("рдкрд╣рдЪрд╛рди рд╕рддреНрдпрд╛рдкрд┐рдд рдХреА рдЬрд╛ рд░рд╣реА рд╣реИ...")
video = cv2.VideoCapture(0)
ret, frame = video.read()
video.release()
if not ret:
self.speak("рдХреИрдорд░рд╛ рдирд╣реАрдВ рдЪрд▓рд╛")
exit()
try:
unknown_encoding = face_recognition.face_encodings(frame)\[0\]
for name, image_file in self.users.items():
if not os.path.exists(image_file):
continue
known_image = face_recognition.load_image_file(image_file)
known_encoding = face_recognition.face_encodings(known_image)\[0\]
match = face_recognition.compare_faces(\[known_encoding\], unknown_encoding)
if match\[0\]:
self.active_user = name
self.speak(f"рд╕реНрд╡рд╛рдЧрдд рд╣реИ {name}!")
if name == self.owner_name:
self.permissions\["full_access"\] = True
else:
self.permissions\["full_access"\] = False
return
self.speak("рдкрд╣рдЪрд╛рди рд╡рд┐рдлрд▓")
exit()
except:
self.speak("рдЪреЗрд╣рд░рд╛ рдкрд╣рдЪрд╛рдирдиреЗ рдореЗрдВ рд╡рд┐рдлрд▓")
exit()
def start_voice_loop(self):
threading.Thread(target=self.listen_loop, daemon=True).start()
def listen_loop(self):
while True:
with sr.Microphone() as source:
try:
self.label.text = "ЁЯОЩ рд╕реБрди рд░рд╣рд╛ рд╣реВрдБ..."
audio = self.recognizer.listen(source, timeout=5)
command = self.recognizer.recognize_google(audio, language="hi-IN").lower()
self.label.text = f"ЁЯУе {command}"
emo = self.detect_emotion(command)
self.orb.set_emotion(emo)
if "рд╕рд╡рд╛рд▓" in command or "gpt" in command:
self.ask_gpt(command)
elif "рдирдИ рдкрд╣рдЪрд╛рди рдЬреЛрдбрд╝реЛ" in command and self.permissions.get("full_access"):
self.add_new_user()
elif "рдЕрдиреБрдорддрд┐ рджрд┐рдЦрд╛рдУ" in command and self.permissions.get("full_access"):
self.list_permissions()
elif "рдЕрдиреБрдорддрд┐ рдЪрд╛рд▓реВ рдХрд░реЛ" in command and self.permissions.get("full_access"):
self.toggle_permission(command, True)
elif "рдЕрдиреБрдорддрд┐ рдмрдВрдж рдХрд░реЛ" in command and self.permissions.get("full_access"):
self.toggle_permission(command, False)
else:
self.handle_command(command)
except:
self.label.text = "тЪая╕П рд╕реБрди рдирд╣реАрдВ рдкрд╛рдпрд╛"
def handle_command(self, cmd):
for key in self.features:
if key in cmd:
if self.permissions.get("full_access") or self.permissions.get(key, False):
self.features\[key\]()
else:
self.speak("рдорд╛рдлрд╝ рдХреАрдЬрд┐рдП, рдЖрдкрдХреЗ рдкрд╛рд╕ рдЗрд╕ рдлреАрдЪрд░ рдХреА рдЕрдиреБрдорддрд┐ рдирд╣реАрдВ рд╣реИред")
return
if "рдЕрд▓рд╡рд┐рджрд╛" in cmd or "рдмрдВрдж рдХрд░реЛ" in cmd:
self.speak("рдЕрд▓рд╡рд┐рджрд╛ рд╕рд░")
self.save_memory()
self.save_users()
App.get_running_app().stop()
else:
self.speak("рдпрд╣ рдХрдорд╛рдВрдб рдЬреНрдЮрд╛рдд рдирд╣реАрдВ рд╣реИ")
def ask_gpt(self, prompt):
try:
self.label.text = "ЁЯдЦ GPT рд╕реЛрдЪ рд░рд╣рд╛ рд╣реИ..."
history = self.memory.get("history", \[\])\[-3:\]
history.append({"role": "user", "content": prompt})
res = openai.ChatCompletion.create(model="gpt-4o", messages=history)
reply = res.choices\[0\].message.content.strip()
self.memory.setdefault("history", \[\]).append({"role": "assistant", "content": reply})
self.save_memory()
self.speak(reply)
emo = self.detect_emotion(reply)
self.orb.set_emotion(emo)
except:
self.speak("GPT рд╕реЗ рдЬрд╡рд╛рдм рдирд╣реАрдВ рдорд┐рд▓ рдкрд╛рдпрд╛")
def add_new_user(self):
self.speak("рдХреГрдкрдпрд╛ рдирдпреЗ рд╡реНрдпрдХреНрддрд┐ рдХрд╛ рдирд╛рдо рдмрддрд╛рдЗрдпреЗред")
with sr.Microphone() as source:
audio = self.recognizer.listen(source, timeout=5)
try:
name = self.recognizer.recognize_google(audio, language="hi-IN").capitalize()
self.speak(f"{name} рдХреЗ рдЪреЗрд╣рд░реЗ рдХреА рддрд╕реНрд╡реАрд░ рд▓реЗ рд░рд╣рд╛ рд╣реВрдБред")
video = cv2.VideoCapture(0)
ret, frame = video.read()
video.release()
if ret:
filename = f"{name}.jpg"
cv2.imwrite(filename, frame)
self.users\[name\] = filename
self.save_users()
self.speak(f"{name} рд╕рдлрд▓рддрд╛рдкреВрд░реНрд╡рдХ рдЬреЛрдбрд╝рд╛ рдЧрдпрд╛ред")
else:
self.speak("рдХреИрдорд░рд╛ рдирд╣реАрдВ рдЪрд▓рд╛")
except:
self.speak("рдирд╛рдо рдирд╣реАрдВ рд╕рдордЭ рдкрд╛рдпрд╛ред")
def list_permissions(self):
allowed = \[key for key, val in self.permissions.items() if val\]
self.speak("рдЕрднреА рдЪрд╛рд▓реВ рдЕрдиреБрдорддрд┐рдпрд╛рдБ рд╣реИрдВ: " + ", ".join(allowed))
def toggle_permission(self, command, status):
for key in self.features:
if key in command:
self.permissions\[key\] = status
state = "рдЪрд╛рд▓реВ" if status else "рдмрдВрдж"
self.speak(f"{key} рдХреА рдЕрдиреБрдорддрд┐ {state} рдХрд░ рджреА рдЧрдИ рд╣реИред")
return
self.speak("рдпрд╣ рдлреАрдЪрд░ рдирд╣реАрдВ рдорд┐рд▓рд╛ред")
# System Feature Methods
def open_browser(self):
self.speak("рдмреНрд░рд╛рдЙрдЬрд╝рд░ рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("xdg-open https://www.google.com")
def bluetooth_on(self):
os.system("rfkill unblock bluetooth")
self.speak("рдмреНрд▓реВрдЯреВрде рдЪрд╛рд▓реВ рдХрд┐рдпрд╛ рдЧрдпрд╛")
def bluetooth_off(self):
os.system("rfkill block bluetooth")
self.speak("рдмреНрд▓реВрдЯреВрде рдмрдВрдж рдХрд┐рдпрд╛ рдЧрдпрд╛")
def open_whatsapp(self):
self.speak("рд╡реНрд╣рд╛рдЯреНрд╕рдПрдк рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -n com.whatsapp/.HomeActivity")
def open_youtube(self):
self.speak("рдпреВрдЯреНрдпреВрдм рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -n com.google.android.youtube/.HomeActivity")
def open_gallery(self):
self.speak("рдЧреИрд▓рд░реА рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.VIEW -t image/\*")
def open_files(self):
self.speak("рдлрд╛рдЗрд▓реНрд╕ рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.VIEW -t resource/folder")
def open_music(self):
self.speak("рд╕рдВрдЧреАрдд рдРрдк рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.MUSIC_PLAYER")
def open_contacts(self):
self.speak("рдХреЙрдиреНрдЯреИрдХреНрдЯреНрд╕ рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.VIEW -t vnd.android.cursor.dir/contact")
def open_call_logs(self):
self.speak("рдХреЙрд▓ рд▓реЙрдЧреНрд╕ рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.VIEW -t vnd.android.cursor.dir/calls")
def open_sms(self):
self.speak("SMS рдРрдк рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.MAIN -t vnd.android-dir/mms-sms")
def open_calendar(self):
self.speak("рдХреИрд▓реЗрдВрдбрд░ рдЦреЛрд▓ рд░рд╣рд╛ рд╣реВрдБ")
os.system("am start -a android.intent.action.VIEW -t vnd.android.cursor.dir/event")
def list_installed_apps(self):
self.speak("рдЖрдкрдХреЗ рдлрд╝реЛрди рдореЗрдВ рдЗрдВрд╕реНрдЯреЙрд▓реНрдб рдРрдкреНрд╕ рдЦреЛрдЬ рд░рд╣рд╛ рд╣реВрдБред")
try:
output = subprocess.check_output(\["pm", "list", "packages"\])
packages = output.decode().splitlines()
app_list = \[pkg.split(":")\[1\] for pkg in packages\]
self.speak(f"рдЖрдкрдХреЗ рдлреЛрди рдореЗрдВ {len(app_list)} рдРрдкреНрд╕ рд╣реИрдВред")
except:
self.speak("рдРрдкреНрд╕ рд▓рд┐рд╕реНрдЯ рдирд╣реАрдВ рдХрд░ рдкрд╛рдпрд╛ред")
def set_reminder(self):
self.speak("рдХреНрдпрд╛ рдпрд╛рдж рджрд┐рд▓рд╛рдирд╛ рд╣реИ?")
with sr.Microphone() as source:
audio = self.recognizer.listen(source, timeout=5)
try:
reminder = self.recognizer.recognize_google(audio, language="hi-IN")
self.memory.setdefault("reminders", \[\]).append(reminder)
self.save_memory()
self.speak(f"рдпрд╛рдж рд░рдЦреВрдБрдЧрд╛: {reminder}")
except:
self.speak("рдпрд╛рдж рдирд╣реАрдВ рд░рдЦ рдкрд╛рдпрд╛ред")
def set_timer(self):
self.speak("рдХрд┐рддрдиреЗ рд╕реЗрдХрдВрдб рдХрд╛ рдЯрд╛рдЗрдорд░ рд╕реЗрдЯ рдХрд░рдирд╛ рд╣реИ?")
with sr.Microphone() as source:
audio = self.recognizer.listen(source, timeout=5)
try:
seconds = int(self.recognizer.recognize_google(audio, language="hi-IN"))
threading.Thread(target=self.timer_countdown, args=(seconds,), daemon=True).start()
self.speak(f"{seconds} рд╕реЗрдХрдВрдб рдХрд╛ рдЯрд╛рдЗрдорд░ рдЪрд╛рд▓реВ")
except:
self.speak("рдЯрд╛рдЗрдорд░ рд╕реЗрдЯ рдирд╣реАрдВ рдХрд░ рдкрд╛рдпрд╛ред")
def timer_countdown(self, seconds):
time.sleep(seconds)
self.speak("рдЯрд╛рдЗрдорд░ рд╕рдорд╛рдкреНрдд")
def list_features(self):
features_list = ", ".join(self.features.keys())
self.speak(f"рдореЗрд░реЗ рдЕрдВрджрд░ рдпреЗ рд╕рднреА рдлреАрдЪрд░реНрд╕ рд╣реИрдВ: {features_list}")
def show_memory(self):
if "reminders" in self.memory:
reminders = ", ".join(self.memory\["reminders"\])
self.speak(f"рдЖрдкрдиреЗ рдореБрдЭреЗ рдпреЗ рдпрд╛рдж рджрд┐рд▓рд╛рдиреЗ рдХреЛ рдХрд╣рд╛ рдерд╛: {reminders}")
else:
self.speak("рдХреЛрдИ рдпрд╛рдж рдирд╣реАрдВ рдорд┐рд▓реАред")
class RagfistAIApp(App):
def build(self):
return RagfistCore()
if name == 'main':
RagfistAIApp().run()