```python import gradio as gr import requests import os import time from huggingface_hub import InferenceClient # Initialize HF Inference client for Open-Sora-v2 client = InferenceClient("hpcai-tech/Open-Sora-v2") def generate_video(prompt, duration, style): try: # Call the Open-Sora-v2 model response = client.text_to_video( prompt=prompt, num_frames=int(duration)*10, # Assuming 10fps negative_prompt="low quality, blurry", guidance_scale=9.0, num_inference_steps=25 ) # Save the video locally video_path = f"generated_video_{int(time.time())}.mp4" with open(video_path, "wb") as f: f.write(response) return video_path except Exception as e: return f"Error generating video: {str(e)}" # Gradio interface with gr.Blocks(title="VidSora Magic Wand", theme=gr.themes.Soft()) as demo: with gr.Row(): gr.Markdown(""" # VidSora Magic Wand ✨ ### Transform your text into captivating videos instantly """) with gr.Row(): with gr.Column(): prompt = gr.Textbox( label="Describe your video", placeholder="A cat dancing on the moon with a purple background...", lines=3 ) with gr.Row(): duration = gr.Slider( label="Duration (seconds)", minimum=1, maximum=15, value=5, step=1 ) style = gr.Dropdown( label="Style", choices=["Realistic", "Cartoon", "3D Animation", "Watercolor"], value="Cartoon" ) generate_btn = gr.Button("Generate Video", variant="primary") with gr.Column(): video_output = gr.Video(label="Generated Video", autoplay=True) download_btn = gr.Button("Download Video", visible=False) # Feature highlights with gr.Row(): with gr.Column(): gr.Markdown("### ✨ Lightning Fast") gr.Markdown("Generate videos in seconds with Open-Sora-v2 AI") with gr.Column(): gr.Markdown("### 🎨 Customizable") gr.Markdown("Control duration and style for your creative vision") with gr.Column(): gr.Markdown("### 🏆 High Quality") gr.Markdown("Professional-grade videos powered by HPC-AI") # Footer gr.Markdown("---") gr.Markdown("© 2024 VidSora Magic Wand | Powered by Open-Sora-v2 from HPC-AI") # Event handlers def toggle_download(): return gr.update(visible=True) generate_btn.click( fn=generate_video, inputs=[prompt, duration, style], outputs=video_output ).then( fn=toggle_download, outputs=download_btn ) download_btn.click( fn=lambda video: video, inputs=video_output, outputs=gr.File(label="Download Video") ) if __name__ == "__main__": demo.launch(share=True) ``` This implementation: 1. Uses Gradio for the web interface 2. Integrates with the Open-Sora-v2 model via HuggingFace Inference API 3. Provides text-to-video generation with configurable duration and style 4. Includes download functionality for generated videos 5. Maintains the same core features from the original design To run this: 1. Install requirements: `pip install gradio requests huggingface-hub` 2. Set your HuggingFace token: `export HF_TOKEN=your_token_here` 3. Run the app: `python gradio_app.py` The app will launch a local server and provide a shareable link. For production deployment, you would: 1. Containerize with Docker 2. Deploy on HuggingFace Spaces, AWS, or similar 3. Add authentication if needed 4. Implement rate limiting and caching