OpenAPIの処理が延々とかかって心理的にアレなので
実際のところ
from openai import OpenAI from dotenv import dotenv_values import threading import time import sys def loading_animation(event): animation_chars = ['|', '/', '-', '\\'] i = 0 while not event.is_set(): sys.stdout.write('\r' + animation_chars[i % len(animation_chars)]) sys.stdout.flush() time.sleep(0.1) i += 1 # Clean Up sys.stdout.write('\r\rDone! \n') def genImage(client,response_event): response = client.images.generate( model="dall-e-3", prompt="a black cat on an american classic car.", size="1024x1024", quality="standard", n=1 ) response_event.set() return response print("process start") config = dotenv_values(".env") yourclient = OpenAI( api_key= config["OPENAI_API_KEY"] ) response_event = threading.Event() animation_thread = threading.Thread(target=loading_animation, args=(response_event,)) animation_thread.start() result = genImage(yourclient,response_event) image_url = result.data[0].url print(image_url) animation_thread.join()