main.py 1.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445
  1. #!/usr/bin/env python3
  2. import asyncio
  3. import functools
  4. import io
  5. from typing import Callable
  6. import replicate # very nice API to run AI models; see https://replicate.com/
  7. from nicegui import ui
  8. from nicegui.events import UploadEventArguments
  9. async def io_bound(callback: Callable, *args: any, **kwargs: any):
  10. '''Makes a blocking function awaitable; pass function as first parameter and its arguments as the rest'''
  11. return await asyncio.get_event_loop().run_in_executor(None, functools.partial(callback, *args, **kwargs))
  12. async def transcribe(e: UploadEventArguments):
  13. transcription.text = 'Transcribing...'
  14. model = replicate.models.get('openai/whisper')
  15. version = model.versions.get('30414ee7c4fffc37e260fcab7842b5be470b9b840f2b608f5baa9bbef9a259ed')
  16. prediction = await io_bound(version.predict, audio=io.BytesIO(e.content))
  17. text = prediction.get('transcription', 'no transcription')
  18. transcription.set_text(f'result: "{text}"')
  19. async def generate_image():
  20. image.source = 'https://dummyimage.com/600x400/ccc/000000.png&text=building+image...'
  21. model = replicate.models.get('stability-ai/stable-diffusion')
  22. prediction = await io_bound(model.predict, prompt=prompt.value)
  23. image.source = prediction[0]
  24. # User Interface
  25. with ui.row().style('gap:10em'):
  26. with ui.column():
  27. ui.label('OpenAI Whisper (voice transcription)').classes('text-2xl')
  28. ui.upload(on_upload=transcribe).style('width: 20em')
  29. transcription = ui.label().classes('text-xl')
  30. with ui.column():
  31. ui.label('Stable Diffusion (image generator)').classes('text-2xl')
  32. prompt = ui.input('prompt').style('width: 20em')
  33. ui.button('Generate', on_click=generate_image).style('width: 15em')
  34. image = ui.image().style('width: 60em')
  35. ui.run()