79777259

Date: 2025-09-28 10:30:04
Score: 1.5
Natty:
Report link
import streamlit as st
import time
import uuid
from datetime import datetime
import json

# Page configuration
st.set_page_config(
    page_title="AI ChatBot Assistant", 
    page_icon="🤖",
    layout="wide",
    initial_sidebar_state="expanded"
)

# Custom CSS for ChatGPT-like styling
st.markdown("""
<style>
.main-container { max-width: 1200px; margin: 0 auto; }
.chat-message { padding: 1rem; border-radius: 10px; margin-bottom: 1rem; word-wrap: break-word; }
.user-message { background-color: #f0f0f0; margin-left: 20%; border: 1px solid #ddd; }
.assistant-message { background-color: #e3f2fd; margin-right: 20%; border: 1px solid #bbdefb; }
.chat-header { text-align: center; padding: 1rem 0; border-bottom: 2px solid #e0e0e0; margin-bottom: 2rem; }
.sidebar-content { padding: 1rem 0; }
.input-container { position: sticky; bottom: 0; background-color: white; padding: 1rem 0; border-top: 1px solid #e0e0e0; }
.action-button { background-color: #1976d2; color: white; border: none; padding: 0.5rem 1rem; border-radius: 5px; cursor: pointer; margin: 0.25rem; }
.action-button:hover { background-color: #1565c0; }
.speech-button { background-color: #4caf50; color: white; border: none; padding: 0.75rem; border-radius: 50%; cursor: pointer; font-size: 1.2rem; margin-left: 0.5rem; }
.speech-button:hover { background-color: #45a049; }
.speech-button.listening { background-color: #f44336; animation: pulse 1s infinite; }
@keyframes pulse { 0% { opacity: 1; } 50% { opacity: 0.5; } 100% { opacity: 1; } }
.status-indicator { padding: 0.5rem; border-radius: 5px; margin: 0.5rem 0; text-align: center; }
.status-listening { background-color: #ffebee; color: #c62828; }
.status-processing { background-color: #fff3e0; color: #ef6c00; }
.status-ready { background-color: #e8f5e8; color: #2e7d32; }
.chat-stats { background-color: #f5f5f5; padding: 1rem; border-radius: 10px; margin: 1rem 0; }
.export-button { background-color: #ff9800; color: white; border: none; padding: 0.5rem 1rem; border-radius: 5px; cursor: pointer; width: 100%; margin: 0.5rem 0; }
.export-button:hover { background-color: #f57c00; }
</style>
""", unsafe_allow_html=True)

# --- Unified Voice + Text Input ---
def speech_to_text_component():
    speech_html = """
    <div id="speech-container">
        <div style="display: flex; align-items: center; gap: 10px; margin-bottom: 20px;">
            <input type="text" id="speechResult" placeholder="Speak or type your message..." 
                   style="flex: 1; padding: 12px; border: 2px solid #ddd; border-radius: 8px; font-size: 16px;">
            <button id="speechButton" onclick="toggleSpeechRecognition()" 
                    style="padding: 12px; background-color: #4caf50; color: white; border: none; 
                           border-radius: 50%; cursor: pointer; font-size: 18px; width: 50px; height: 50px;">
                🎤
            </button>
        </div>
        <div id="speechStatus" style="padding: 8px; border-radius: 5px; text-align: center; 
                                      background-color: #e8f5e8; color: #2e7d32; margin-bottom: 10px;">
            Ready to listen - Click the microphone to start
        </div>
        <button onclick="submitSpeechText()" id="submitButton"
                style="padding: 12px 24px; background-color: #1976d2; color: white; border: none; 
                       border-radius: 8px; cursor: pointer; font-size: 16px; width: 100%;">
            Send Message
        </button>
    </div>
    <script>
    let recognition;
    let isListening = false;

    if ('webkitSpeechRecognition' in window || 'SpeechRecognition' in window) {
        const SpeechRecognition = window.SpeechRecognition || window.webkitSpeechRecognition;
        recognition = new SpeechRecognition();
        recognition.continuous = false;
        recognition.interimResults = true;
        recognition.lang = 'en-US';

        recognition.onstart = function() {
            isListening = true;
            document.getElementById('speechButton').innerHTML = '🔴';
            document.getElementById('speechButton').style.backgroundColor = '#f44336';
            document.getElementById('speechStatus').innerHTML = 'Listening... Speak now!';
            document.getElementById('speechStatus').className = 'status-listening';
            document.getElementById('speechStatus').style.backgroundColor = '#ffebee';
            document.getElementById('speechStatus').style.color = '#c62828';
        };

        recognition.onresult = function(event) {
            let transcript = '';
            for (let i = 0; i < event.results.length; i++) {
                transcript += event.results[i][0].transcript;
            }
            document.getElementById('speechResult').value = transcript;
            if (event.results[event.results.length - 1].isFinal) {
                document.getElementById('speechStatus').innerHTML = 'Speech captured! Click Send or Enter.';
                document.getElementById('speechStatus').className = 'status-ready status-indicator';
            }
        };

        recognition.onerror = function(event) {
            document.getElementById('speechStatus').innerHTML = 'Error: ' + event.error;
            document.getElementById('speechStatus').className = 'status-listening status-indicator';
            resetSpeechButton();
        };

        recognition.onend = function() {
            resetSpeechButton();
        };
    } else {
        document.getElementById('speechStatus').innerHTML = 'Speech recognition not supported in this browser';
        document.getElementById('speechButton').disabled = true;
    }

    function resetSpeechButton() {
        isListening = false;
        document.getElementById('speechButton').innerHTML = '🎤';
        document.getElementById('speechButton').style.backgroundColor = '#4caf50';
        if (document.getElementById('speechResult').value.trim() === '') {
            document.getElementById('speechStatus').innerHTML = 'Ready to listen - Click the microphone to start';
            document.getElementById('speechStatus').className = 'status-indicator status-ready';
        }
    }

    function toggleSpeechRecognition() {
        if (recognition) {
            if (isListening) {
                recognition.stop();
            } else {
                recognition.start();
            }
        }
    }

    function submitSpeechText() {
        const text = document.getElementById('speechResult').value.trim();
        if (text) {
            window.parent.postMessage({
                type: 'streamlit:setComponentValue',
                value: text
            }, '*');
            document.getElementById('speechResult').value = '';
            document.getElementById('speechStatus').innerHTML = 'Message sent! Ready for next input.';
            document.getElementById('speechStatus').className = 'status-indicator status-ready';
            resetSpeechButton();
        } else {
            document.getElementById('speechStatus').innerHTML = 'Please speak or type a message first.';
            document.getElementById('speechStatus').className = 'status-listening status-indicator';
        }
    }

    document.getElementById('speechResult').addEventListener('keypress', function(e) {
        if (e.key === 'Enter') {
            submitSpeechText();
        }
    });
    </script>
    """
    return st.components.v1.html(speech_html, height=200)

def initialize_session_state():
    if "messages" not in st.session_state:
        st.session_state.messages = [
            {"role": "assistant", "content": "👋 Hello! I'm your AI assistant. How can I help you today?", "timestamp": datetime.now()}
        ]
    if "session_id" not in st.session_state:
        st.session_state.session_id = str(uuid.uuid4())
    if "user_name" not in st.session_state:
        st.session_state.user_name = "User"
    if "chat_count" not in st.session_state:
        st.session_state.chat_count = 0

def generate_ai_response(user_input):
    time.sleep(1)
    responses = {
        "hello": "Hello! Great to meet you! How can I assist you today?",
        "help": "I'm here to help! You can ask me questions, have a conversation, or use voice input by clicking the microphone button.",
        "how are you": "I'm doing great, thank you for asking! I'm ready to help with whatever you need.",
        "voice": "Yes! I support voice input. Just click the microphone button and speak your message.",
        "features": "I support text and voice input, conversation history, message export, and more. What would you like to explore?",
    }
    if isinstance(user_input, str):
        user_lower = user_input.lower()
        for key, response in responses.items():
            if key in user_lower:
                return response
        return f"Thanks for your message: '{user_input}'. This is a demo response. In a real application, connect to an AI service here."
    else:
        return "Sorry, I didn't understand that input."

def export_chat_history():
    export_data = {
        "session_id": st.session_state.session_id,
        "user_name": st.session_state.user_name,
        "export_time": datetime.now().isoformat(),
        "message_count": len(st.session_state.messages),
        "messages": [
            {
                "role": msg["role"],
                "content": msg["content"],
                "timestamp": msg["timestamp"].isoformat() if "timestamp" in msg else None
            }
            for msg in st.session_state.messages
        ]
    }
    return json.dumps(export_data, indent=2)

def main():
    initialize_session_state()
    # Header
    st.markdown('<div class="chat-header">', unsafe_allow_html=True)
    st.title("🤖 AI ChatBot Assistant")
    st.markdown("*Advanced chat interface with voice input capabilities*")
    st.markdown('</div>', unsafe_allow_html=True)

    # Sidebar
    with st.sidebar:
        st.markdown('<div class="sidebar-content">', unsafe_allow_html=True)
        st.header("âš™ī¸ Chat Settings")
        user_name = st.text_input("Your Name:", value=st.session_state.user_name)
        if user_name != st.session_state.user_name:
            st.session_state.user_name = user_name
        st.divider()
        st.subheader("📊 Chat Statistics")
        st.markdown(f"""
        <div class="chat-stats">
            <p><strong>Messages:</strong> {len(st.session_state.messages)}</p>
            <p><strong>Session ID:</strong> {st.session_state.session_id[:8]}...</p>
            <p><strong>Started:</strong> Just now</p>
        </div>
        """, unsafe_allow_html=True)
        st.subheader("🔧 Chat Controls")
        if st.button("đŸ—‘ī¸ Clear Chat History", type="secondary", use_container_width=True):
            st.session_state.messages = [
                {"role": "assistant", "content": "👋 Hello! I'm your AI assistant. How can I help you today?", "timestamp": datetime.now()}
            ]
            st.rerun()
        if st.button("📤 Export Chat", type="secondary", use_container_width=True):
            exported_data = export_chat_history()
            st.download_button(
                label="💾 Download Chat History",
                data=exported_data,
                file_name=f"chat_history_{datetime.now().strftime('%Y%m%d_%H%M%S')}.json",
                mime="application/json",
                use_container_width=True
            )
        st.divider()
        st.subheader("â„šī¸ How to Use")
        st.markdown("""
        **Text Input:** Type your message and press Enter or click Send

        **Voice Input:** Click the 🎤 microphone button and speak

        **Features:**
        - Real-time speech recognition
        - Chat history preservation
        - Message export functionality
        - Responsive design
        """)
        st.markdown('</div>', unsafe_allow_html=True)

    # Main chat area
    col1, col2, col3 = st.columns([1, 6, 1])
    with col2:
        st.markdown('<div class="main-container">', unsafe_allow_html=True)
        chat_container = st.container()
        with chat_container:
            for i, message in enumerate(st.session_state.messages):
                with st.chat_message(message["role"]):
                    st.markdown(message["content"])
                    if "timestamp" in message:
                        st.caption(f"*{message['timestamp'].strftime('%H:%M:%S')}*")
        st.markdown('</div>', unsafe_allow_html=True)

    # ---- SINGLE Input Box for both text and voice ----
    st.markdown('<div class="input-container">', unsafe_allow_html=True)
    st.subheader("🎤 Voice & Text Input")
    user_input = speech_to_text_component() # This is now the ONLY input

    if user_input and isinstance(user_input, str) and user_input.strip():
        user_input = user_input.strip()
        st.session_state.messages.append({
            "role": "user", 
            "content": user_input,
            "timestamp": datetime.now()
        })
        with st.spinner("🤔 Thinking..."):
            ai_response = generate_ai_response(user_input)
            st.session_state.messages.append({
                "role": "assistant", 
                "content": ai_response,
                "timestamp": datetime.now()
            })
        st.session_state.chat_count += 1
        st.rerun()
    st.markdown('</div>', unsafe_allow_html=True)

if __name__ == "__main__":
    main()
Reasons:
  • Blacklisted phrase (0.5): thank you
  • Blacklisted phrase (0.5): Thanks
  • Blacklisted phrase (0.5): How can I
  • Long answer (-1):
  • Has code block (-0.5):
  • Self-answer (0.5):
  • Low reputation (1):
Posted by: Praveen Kumar