diff options
| author | Jonas Gerg <joniogerg@gmail.com> | 2025-09-09 20:06:52 +0200 |
|---|---|---|
| committer | Jonas Gerg <joniogerg@gmail.com> | 2025-09-09 20:06:52 +0200 |
| commit | 3e5d3ca82193e8e8561beb9ceac9982f376d84e2 (patch) | |
| tree | 76e4c260123b68b93da2417482024ba11f9838ee /archive/2025/summer/bsc_gerg/src/llm.py | |
| parent | a910d0a3e57f4de47cf2387ac239ae8d0eaca507 (diff) | |
| download | research-work-archive-artifacts-3e5d3ca82193e8e8561beb9ceac9982f376d84e2.tar.gz research-work-archive-artifacts-3e5d3ca82193e8e8561beb9ceac9982f376d84e2.zip | |
Add bsc_gerg
Diffstat (limited to 'archive/2025/summer/bsc_gerg/src/llm.py')
| -rw-r--r-- | archive/2025/summer/bsc_gerg/src/llm.py | 52 |
1 files changed, 52 insertions, 0 deletions
diff --git a/archive/2025/summer/bsc_gerg/src/llm.py b/archive/2025/summer/bsc_gerg/src/llm.py new file mode 100644 index 000000000..1e11df26f --- /dev/null +++ b/archive/2025/summer/bsc_gerg/src/llm.py @@ -0,0 +1,52 @@ +from typing import Tuple + +import backoff +import dotenv +from openai import AsyncOpenAI, RateLimitError + +client: AsyncOpenAI | None = None +def get_openai_client() -> AsyncOpenAI: + global client + if client is None: + dotenv.load_dotenv() + client = AsyncOpenAI() + return client + +seed = 42 + +@backoff.on_exception(backoff.expo, RateLimitError) +async def create_completion_openai( + messages: list[Tuple[str, str]], + model: str = "gpt-4o-mini", + temperature=0, + max_completion_tokens=2048, + top_p=0, + frequency_penalty=0, + presence_penalty=0, + store=False, + logprobs=False, + ): + response = await get_openai_client().chat.completions.create( + model=model, + messages=[ + { + "role": role, + "content": prompt + } for role, prompt in messages + ], + response_format={"type": "text"}, + temperature=temperature, + max_completion_tokens=max_completion_tokens, + top_p=top_p, + frequency_penalty=frequency_penalty, + presence_penalty=presence_penalty, + store=store, + logprobs=logprobs, + top_logprobs=5 if logprobs else None, + seed=seed, + ) + + if logprobs: + return response.choices[0].message.content, response.choices[0].logprobs + else: + return response.choices[0].message.content |