langfuse.openai

If you use the OpenAI Python SDK, you can use the Langfuse drop-in replacement to get full logging by changing only the import.

- import openai
+ from langfuse.openai import openai

Langfuse automatically tracks:

  • All prompts/completions with support for streaming, async and functions
  • Latencies
  • API Errors
  • Model usage (tokens) and cost (USD)

The integration is fully interoperable with the observe() decorator and the low-level tracing SDK.

See docs for more details: https://langfuse.com/docs/integrations/openai

  1"""If you use the OpenAI Python SDK, you can use the Langfuse drop-in replacement to get full logging by changing only the import.
  2
  3```diff
  4- import openai
  5+ from langfuse.openai import openai
  6```
  7
  8Langfuse automatically tracks:
  9
 10- All prompts/completions with support for streaming, async and functions
 11- Latencies
 12- API Errors
 13- Model usage (tokens) and cost (USD)
 14
 15The integration is fully interoperable with the `observe()` decorator and the low-level tracing SDK.
 16
 17See docs for more details: https://langfuse.com/docs/integrations/openai
 18"""
 19
 20import copy
 21import logging
 22import types
 23from typing import List, Optional
 24
 25from packaging.version import Version
 26from wrapt import wrap_function_wrapper
 27
 28from langfuse import Langfuse
 29from langfuse.client import StatefulGenerationClient
 30from langfuse.decorators import langfuse_context
 31from langfuse.utils import _get_timestamp
 32from langfuse.utils.langfuse_singleton import LangfuseSingleton
 33
 34try:
 35    import openai
 36except ImportError:
 37    raise ModuleNotFoundError(
 38        "Please install OpenAI to use this feature: 'pip install openai'"
 39    )
 40
 41try:
 42    from openai import AsyncAzureOpenAI, AsyncOpenAI, AzureOpenAI, OpenAI  # noqa: F401
 43except ImportError:
 44    AsyncAzureOpenAI = None
 45    AsyncOpenAI = None
 46    AzureOpenAI = None
 47    OpenAI = None
 48
 49log = logging.getLogger("langfuse")
 50
 51
 52class OpenAiDefinition:
 53    module: str
 54    object: str
 55    method: str
 56    type: str
 57    sync: bool
 58
 59    def __init__(self, module: str, object: str, method: str, type: str, sync: bool):
 60        self.module = module
 61        self.object = object
 62        self.method = method
 63        self.type = type
 64        self.sync = sync
 65
 66
 67OPENAI_METHODS_V0 = [
 68    OpenAiDefinition(
 69        module="openai",
 70        object="ChatCompletion",
 71        method="create",
 72        type="chat",
 73        sync=True,
 74    ),
 75    OpenAiDefinition(
 76        module="openai",
 77        object="Completion",
 78        method="create",
 79        type="completion",
 80        sync=True,
 81    ),
 82]
 83
 84
 85OPENAI_METHODS_V1 = [
 86    OpenAiDefinition(
 87        module="openai.resources.chat.completions",
 88        object="Completions",
 89        method="create",
 90        type="chat",
 91        sync=True,
 92    ),
 93    OpenAiDefinition(
 94        module="openai.resources.completions",
 95        object="Completions",
 96        method="create",
 97        type="completion",
 98        sync=True,
 99    ),
100    OpenAiDefinition(
101        module="openai.resources.chat.completions",
102        object="AsyncCompletions",
103        method="create",
104        type="chat",
105        sync=False,
106    ),
107    OpenAiDefinition(
108        module="openai.resources.completions",
109        object="AsyncCompletions",
110        method="create",
111        type="completion",
112        sync=False,
113    ),
114]
115
116
117class OpenAiArgsExtractor:
118    def __init__(
119        self,
120        name=None,
121        metadata=None,
122        trace_id=None,
123        session_id=None,
124        user_id=None,
125        tags=None,
126        parent_observation_id=None,
127        langfuse_prompt=None,  # we cannot use prompt because it's an argument of the old OpenAI completions API
128        **kwargs,
129    ):
130        self.args = {}
131        self.args["name"] = name
132        self.args["metadata"] = metadata
133        self.args["trace_id"] = trace_id
134        self.args["session_id"] = session_id
135        self.args["user_id"] = user_id
136        self.args["tags"] = tags
137        self.args["parent_observation_id"] = parent_observation_id
138        self.args["langfuse_prompt"] = langfuse_prompt
139        self.kwargs = kwargs
140
141    def get_langfuse_args(self):
142        return {**self.args, **self.kwargs}
143
144    def get_openai_args(self):
145        return self.kwargs
146
147
148def _langfuse_wrapper(func):
149    def _with_langfuse(open_ai_definitions, initialize):
150        def wrapper(wrapped, instance, args, kwargs):
151            return func(open_ai_definitions, initialize, wrapped, args, kwargs)
152
153        return wrapper
154
155    return _with_langfuse
156
157
158def _extract_chat_prompt(kwargs: any):
159    """Extracts the user input from prompts. Returns an array of messages or dict with messages and functions"""
160    prompt = {}
161
162    if kwargs.get("functions") is not None:
163        prompt.update({"functions": kwargs["functions"]})
164
165    if kwargs.get("function_call") is not None:
166        prompt.update({"function_call": kwargs["function_call"]})
167
168    if kwargs.get("tools") is not None:
169        prompt.update({"tools": kwargs["tools"]})
170
171    if prompt:
172        # uf user provided functions, we need to send these together with messages to langfuse
173        prompt.update(
174            {
175                "messages": _filter_image_data(kwargs.get("messages", [])),
176            }
177        )
178        return prompt
179    else:
180        # vanilla case, only send messages in openai format to langfuse
181        return _filter_image_data(kwargs.get("messages", []))
182
183
184def _extract_chat_response(kwargs: any):
185    """Extracts the llm output from the response."""
186    response = {
187        "role": kwargs.get("role", None),
188    }
189
190    if kwargs.get("function_call") is not None:
191        response.update({"function_call": kwargs["function_call"]})
192
193    if kwargs.get("tool_calls") is not None:
194        response.update({"tool_calls": kwargs["tool_calls"]})
195
196    response.update(
197        {
198            "content": kwargs.get("content", None),
199        }
200    )
201    return response
202
203
204def _get_langfuse_data_from_kwargs(
205    resource: OpenAiDefinition, langfuse: Langfuse, start_time, kwargs
206):
207    name = kwargs.get("name", "OpenAI-generation")
208
209    if name is None:
210        name = "OpenAI-generation"
211
212    if name is not None and not isinstance(name, str):
213        raise TypeError("name must be a string")
214
215    decorator_context_observation_id = langfuse_context.get_current_observation_id()
216    decorator_context_trace_id = langfuse_context.get_current_trace_id()
217
218    trace_id = kwargs.get("trace_id", None) or decorator_context_trace_id
219    if trace_id is not None and not isinstance(trace_id, str):
220        raise TypeError("trace_id must be a string")
221
222    session_id = kwargs.get("session_id", None)
223    if session_id is not None and not isinstance(session_id, str):
224        raise TypeError("session_id must be a string")
225
226    user_id = kwargs.get("user_id", None)
227    if user_id is not None and not isinstance(user_id, str):
228        raise TypeError("user_id must be a string")
229
230    tags = kwargs.get("tags", None)
231    if tags is not None and (
232        not isinstance(tags, list) or not all(isinstance(tag, str) for tag in tags)
233    ):
234        raise TypeError("tags must be a list of strings")
235
236    # Update trace params in decorator context if specified in openai call
237    if decorator_context_trace_id:
238        langfuse_context.update_current_trace(
239            session_id=session_id, user_id=user_id, tags=tags
240        )
241
242    parent_observation_id = kwargs.get("parent_observation_id", None) or (
243        decorator_context_observation_id
244        if decorator_context_observation_id != decorator_context_trace_id
245        else None
246    )
247    if parent_observation_id is not None and not isinstance(parent_observation_id, str):
248        raise TypeError("parent_observation_id must be a string")
249    if parent_observation_id is not None and trace_id is None:
250        raise ValueError("parent_observation_id requires trace_id to be set")
251
252    metadata = kwargs.get("metadata", {})
253
254    if metadata is not None and not isinstance(metadata, dict):
255        raise TypeError("metadata must be a dictionary")
256
257    model = kwargs.get("model", None) or None
258
259    prompt = None
260
261    if resource.type == "completion":
262        prompt = kwargs.get("prompt", None)
263    elif resource.type == "chat":
264        prompt = _extract_chat_prompt(kwargs)
265
266    is_nested_trace = False
267    if trace_id:
268        is_nested_trace = True
269        langfuse.trace(id=trace_id, session_id=session_id, user_id=user_id, tags=tags)
270    else:
271        trace_id = (
272            decorator_context_trace_id
273            or langfuse.trace(
274                session_id=session_id,
275                user_id=user_id,
276                tags=tags,
277                name=name,
278                input=prompt,
279                metadata=metadata,
280            ).id
281        )
282
283    modelParameters = {
284        "temperature": kwargs.get("temperature", 1),
285        "max_tokens": kwargs.get("max_tokens", float("inf")),  # casing?
286        "top_p": kwargs.get("top_p", 1),
287        "frequency_penalty": kwargs.get("frequency_penalty", 0),
288        "presence_penalty": kwargs.get("presence_penalty", 0),
289    }
290
291    langfuse_prompt = kwargs.get("langfuse_prompt", None)
292
293    return {
294        "name": name,
295        "metadata": metadata,
296        "trace_id": trace_id,
297        "parent_observation_id": parent_observation_id,
298        "user_id": user_id,
299        "start_time": start_time,
300        "input": prompt,
301        "model_parameters": modelParameters,
302        "model": model or None,
303        "prompt": langfuse_prompt,
304    }, is_nested_trace
305
306
307def _get_langfuse_data_from_sync_streaming_response(
308    resource: OpenAiDefinition,
309    response,
310    generation: StatefulGenerationClient,
311    langfuse: Langfuse,
312    is_nested_trace,
313):
314    responses = []
315    for i in response:
316        responses.append(i)
317        yield i
318
319    model, completion_start_time, completion = _extract_openai_response(
320        resource, responses
321    )
322
323    # Avoiding the trace-update if trace-id is provided by user.
324    if not is_nested_trace:
325        langfuse.trace(id=generation.trace_id, output=completion)
326
327    _create_langfuse_update(completion, generation, completion_start_time, model=model)
328
329
330async def _get_langfuse_data_from_async_streaming_response(
331    resource: OpenAiDefinition,
332    response,
333    generation: StatefulGenerationClient,
334    langfuse: Langfuse,
335    is_nested_trace,
336):
337    responses = []
338    async for i in response:
339        responses.append(i)
340        yield i
341
342    model, completion_start_time, completion = _extract_openai_response(
343        resource, responses
344    )
345
346    # Avoiding the trace-update if trace-id is provided by user.
347    if not is_nested_trace:
348        langfuse.trace(id=generation.trace_id, output=completion)
349
350    _create_langfuse_update(completion, generation, completion_start_time, model=model)
351
352
353def _create_langfuse_update(
354    completion, generation: StatefulGenerationClient, completion_start_time, model=None
355):
356    update = {
357        "end_time": _get_timestamp(),
358        "output": completion,
359        "completion_start_time": completion_start_time,
360    }
361    if model is not None:
362        update["model"] = model
363
364    generation.update(**update)
365
366
367def _extract_openai_response(resource, responses):
368    completion = [] if resource.type == "chat" else ""
369    model = None
370    completion_start_time = None
371
372    for index, i in enumerate(responses):
373        if index == 0:
374            completion_start_time = _get_timestamp()
375
376        if _is_openai_v1():
377            i = i.__dict__
378
379        model = model or i.get("model", None) or None
380
381        choices = i.get("choices", [])
382
383        for choice in choices:
384            if _is_openai_v1():
385                choice = choice.__dict__
386            if resource.type == "chat":
387                delta = choice.get("delta", None)
388
389                if _is_openai_v1():
390                    delta = delta.__dict__
391
392                if delta.get("role", None) is not None:
393                    completion.append(
394                        {
395                            "role": delta.get("role", None),
396                            "function_call": None,
397                            "tool_calls": None,
398                            "content": None,
399                        }
400                    )
401
402                elif delta.get("content", None) is not None:
403                    completion[-1]["content"] = (
404                        delta.get("content", None)
405                        if completion[-1]["content"] is None
406                        else completion[-1]["content"] + delta.get("content", None)
407                    )
408
409                elif delta.get("function_call", None) is not None:
410                    completion[-1]["function_call"] = (
411                        delta.get("function_call", None)
412                        if completion[-1]["function_call"] is None
413                        else completion[-1]["function_call"]
414                        + delta.get("function_call", None)
415                    )
416                elif delta.get("tools_call", None) is not None:
417                    completion[-1]["tool_calls"] = (
418                        delta.get("tools_call", None)
419                        if completion[-1]["tool_calls"] is None
420                        else completion[-1]["tool_calls"]
421                        + delta.get("tools_call", None)
422                    )
423            if resource.type == "completion":
424                completion += choice.get("text", None)
425
426    def get_response_for_chat():
427        if len(completion) > 0:
428            if completion[-1].get("content", None) is not None:
429                return completion[-1]["content"]
430            elif completion[-1].get("function_call", None) is not None:
431                return completion[-1]["function_call"]
432            elif completion[-1].get("tool_calls", None) is not None:
433                return completion[-1]["tool_calls"]
434        return None
435
436    return (
437        model,
438        completion_start_time,
439        get_response_for_chat() if resource.type == "chat" else completion,
440    )
441
442
443def _get_langfuse_data_from_default_response(resource: OpenAiDefinition, response):
444    model = response.get("model", None) or None
445
446    completion = None
447    if resource.type == "completion":
448        choices = response.get("choices", [])
449        if len(choices) > 0:
450            choice = choices[-1]
451
452            completion = choice.text if _is_openai_v1() else choice.get("text", None)
453    elif resource.type == "chat":
454        choices = response.get("choices", [])
455        if len(choices) > 0:
456            choice = choices[-1]
457            completion = (
458                _extract_chat_response(choice.message.__dict__)
459                if _is_openai_v1()
460                else choice.get("message", None)
461            )
462
463    usage = response.get("usage", None)
464
465    return model, completion, usage.__dict__ if _is_openai_v1() else usage
466
467
468def _is_openai_v1():
469    return Version(openai.__version__) >= Version("1.0.0")
470
471
472def _is_streaming_response(response):
473    return (
474        isinstance(response, types.GeneratorType)
475        or isinstance(response, types.AsyncGeneratorType)
476        or (_is_openai_v1() and isinstance(response, openai.Stream))
477        or (_is_openai_v1() and isinstance(response, openai.AsyncStream))
478    )
479
480
481@_langfuse_wrapper
482def _wrap(open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs):
483    new_langfuse: Langfuse = initialize()
484
485    start_time = _get_timestamp()
486    arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
487
488    generation, is_nested_trace = _get_langfuse_data_from_kwargs(
489        open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
490    )
491    generation = new_langfuse.generation(**generation)
492    try:
493        openai_response = wrapped(**arg_extractor.get_openai_args())
494
495        if _is_streaming_response(openai_response):
496            return _get_langfuse_data_from_sync_streaming_response(
497                open_ai_resource,
498                openai_response,
499                generation,
500                new_langfuse,
501                is_nested_trace,
502            )
503
504        else:
505            model, completion, usage = _get_langfuse_data_from_default_response(
506                open_ai_resource,
507                openai_response.__dict__ if _is_openai_v1() else openai_response,
508            )
509            generation.update(
510                model=model, output=completion, end_time=_get_timestamp(), usage=usage
511            )
512
513            # Avoiding the trace-update if trace-id is provided by user.
514            if not is_nested_trace:
515                new_langfuse.trace(id=generation.trace_id, output=completion)
516
517        return openai_response
518    except Exception as ex:
519        log.warning(ex)
520        model = kwargs.get("model", None) or None
521        generation.update(
522            end_time=_get_timestamp(),
523            status_message=str(ex),
524            level="ERROR",
525            model=model,
526            usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
527        )
528        raise ex
529
530
531@_langfuse_wrapper
532async def _wrap_async(
533    open_ai_resource: OpenAiDefinition, initialize, wrapped, args, kwargs
534):
535    new_langfuse = initialize()
536    start_time = _get_timestamp()
537    arg_extractor = OpenAiArgsExtractor(*args, **kwargs)
538
539    generation, is_nested_trace = _get_langfuse_data_from_kwargs(
540        open_ai_resource, new_langfuse, start_time, arg_extractor.get_langfuse_args()
541    )
542    generation = new_langfuse.generation(**generation)
543    try:
544        openai_response = await wrapped(**arg_extractor.get_openai_args())
545
546        if _is_streaming_response(openai_response):
547            return _get_langfuse_data_from_async_streaming_response(
548                open_ai_resource,
549                openai_response,
550                generation,
551                new_langfuse,
552                is_nested_trace,
553            )
554
555        else:
556            model, completion, usage = _get_langfuse_data_from_default_response(
557                open_ai_resource,
558                openai_response.__dict__ if _is_openai_v1() else openai_response,
559            )
560            generation.update(
561                model=model,
562                output=completion,
563                end_time=_get_timestamp(),
564                usage=usage,
565            )
566            # Avoiding the trace-update if trace-id is provided by user.
567            if not is_nested_trace:
568                new_langfuse.trace(id=generation.trace_id, output=completion)
569
570        return openai_response
571    except Exception as ex:
572        model = kwargs.get("model", None) or None
573        generation.update(
574            end_time=_get_timestamp(),
575            status_message=str(ex),
576            level="ERROR",
577            model=model,
578            usage={"input_cost": 0, "output_cost": 0, "total_cost": 0},
579        )
580        raise ex
581
582
583class OpenAILangfuse:
584    _langfuse: Optional[Langfuse] = None
585
586    def initialize(self):
587        self._langfuse = LangfuseSingleton().get(
588            public_key=openai.langfuse_public_key,
589            secret_key=openai.langfuse_secret_key,
590            host=openai.langfuse_host,
591            debug=openai.langfuse_debug,
592            enabled=openai.langfuse_enabled,
593            sdk_integration="openai",
594        )
595
596        return self._langfuse
597
598    def flush(cls):
599        cls._langfuse.flush()
600
601    def register_tracing(self):
602        resources = OPENAI_METHODS_V1 if _is_openai_v1() else OPENAI_METHODS_V0
603
604        for resource in resources:
605            wrap_function_wrapper(
606                resource.module,
607                f"{resource.object}.{resource.method}",
608                _wrap(resource, self.initialize)
609                if resource.sync
610                else _wrap_async(resource, self.initialize),
611            )
612
613        setattr(openai, "langfuse_public_key", None)
614        setattr(openai, "langfuse_secret_key", None)
615        setattr(openai, "langfuse_host", None)
616        setattr(openai, "langfuse_debug", None)
617        setattr(openai, "langfuse_enabled", True)
618        setattr(openai, "flush_langfuse", self.flush)
619
620
621modifier = OpenAILangfuse()
622modifier.register_tracing()
623
624
625def auth_check():
626    if modifier._langfuse is None:
627        modifier.initialize()
628
629    return modifier._langfuse.auth_check()
630
631
632def _filter_image_data(messages: List[dict]):
633    """https://platform.openai.com/docs/guides/vision?lang=python
634
635    The messages array remains the same, but the 'image_url' is removed from the 'content' array.
636    It should only be removed if the value starts with 'data:image/jpeg;base64,'
637
638    """
639    output_messages = copy.deepcopy(messages)
640
641    for message in output_messages:
642        if message.get("content", None) is not None:
643            content = message["content"]
644            for index, item in enumerate(content):
645                if isinstance(item, dict) and item.get("image_url", None) is not None:
646                    url = item["image_url"]["url"]
647                    if url.startswith("data:image/"):
648                        del content[index]["image_url"]
649
650    return output_messages
log = <Logger langfuse (WARNING)>
class OpenAiDefinition:
53class OpenAiDefinition:
54    module: str
55    object: str
56    method: str
57    type: str
58    sync: bool
59
60    def __init__(self, module: str, object: str, method: str, type: str, sync: bool):
61        self.module = module
62        self.object = object
63        self.method = method
64        self.type = type
65        self.sync = sync
OpenAiDefinition(module: str, object: str, method: str, type: str, sync: bool)
60    def __init__(self, module: str, object: str, method: str, type: str, sync: bool):
61        self.module = module
62        self.object = object
63        self.method = method
64        self.type = type
65        self.sync = sync
module: str
object: str
method: str
type: str
sync: bool
OPENAI_METHODS_V0 = [<OpenAiDefinition object>, <OpenAiDefinition object>]
OPENAI_METHODS_V1 = [<OpenAiDefinition object>, <OpenAiDefinition object>, <OpenAiDefinition object>, <OpenAiDefinition object>]
class OpenAiArgsExtractor:
118class OpenAiArgsExtractor:
119    def __init__(
120        self,
121        name=None,
122        metadata=None,
123        trace_id=None,
124        session_id=None,
125        user_id=None,
126        tags=None,
127        parent_observation_id=None,
128        langfuse_prompt=None,  # we cannot use prompt because it's an argument of the old OpenAI completions API
129        **kwargs,
130    ):
131        self.args = {}
132        self.args["name"] = name
133        self.args["metadata"] = metadata
134        self.args["trace_id"] = trace_id
135        self.args["session_id"] = session_id
136        self.args["user_id"] = user_id
137        self.args["tags"] = tags
138        self.args["parent_observation_id"] = parent_observation_id
139        self.args["langfuse_prompt"] = langfuse_prompt
140        self.kwargs = kwargs
141
142    def get_langfuse_args(self):
143        return {**self.args, **self.kwargs}
144
145    def get_openai_args(self):
146        return self.kwargs
OpenAiArgsExtractor( name=None, metadata=None, trace_id=None, session_id=None, user_id=None, tags=None, parent_observation_id=None, langfuse_prompt=None, **kwargs)
119    def __init__(
120        self,
121        name=None,
122        metadata=None,
123        trace_id=None,
124        session_id=None,
125        user_id=None,
126        tags=None,
127        parent_observation_id=None,
128        langfuse_prompt=None,  # we cannot use prompt because it's an argument of the old OpenAI completions API
129        **kwargs,
130    ):
131        self.args = {}
132        self.args["name"] = name
133        self.args["metadata"] = metadata
134        self.args["trace_id"] = trace_id
135        self.args["session_id"] = session_id
136        self.args["user_id"] = user_id
137        self.args["tags"] = tags
138        self.args["parent_observation_id"] = parent_observation_id
139        self.args["langfuse_prompt"] = langfuse_prompt
140        self.kwargs = kwargs
args
kwargs
def get_langfuse_args(self):
142    def get_langfuse_args(self):
143        return {**self.args, **self.kwargs}
def get_openai_args(self):
145    def get_openai_args(self):
146        return self.kwargs
class OpenAILangfuse:
584class OpenAILangfuse:
585    _langfuse: Optional[Langfuse] = None
586
587    def initialize(self):
588        self._langfuse = LangfuseSingleton().get(
589            public_key=openai.langfuse_public_key,
590            secret_key=openai.langfuse_secret_key,
591            host=openai.langfuse_host,
592            debug=openai.langfuse_debug,
593            enabled=openai.langfuse_enabled,
594            sdk_integration="openai",
595        )
596
597        return self._langfuse
598
599    def flush(cls):
600        cls._langfuse.flush()
601
602    def register_tracing(self):
603        resources = OPENAI_METHODS_V1 if _is_openai_v1() else OPENAI_METHODS_V0
604
605        for resource in resources:
606            wrap_function_wrapper(
607                resource.module,
608                f"{resource.object}.{resource.method}",
609                _wrap(resource, self.initialize)
610                if resource.sync
611                else _wrap_async(resource, self.initialize),
612            )
613
614        setattr(openai, "langfuse_public_key", None)
615        setattr(openai, "langfuse_secret_key", None)
616        setattr(openai, "langfuse_host", None)
617        setattr(openai, "langfuse_debug", None)
618        setattr(openai, "langfuse_enabled", True)
619        setattr(openai, "flush_langfuse", self.flush)
def initialize(self):
587    def initialize(self):
588        self._langfuse = LangfuseSingleton().get(
589            public_key=openai.langfuse_public_key,
590            secret_key=openai.langfuse_secret_key,
591            host=openai.langfuse_host,
592            debug=openai.langfuse_debug,
593            enabled=openai.langfuse_enabled,
594            sdk_integration="openai",
595        )
596
597        return self._langfuse
def flush(cls):
599    def flush(cls):
600        cls._langfuse.flush()
def register_tracing(self):
602    def register_tracing(self):
603        resources = OPENAI_METHODS_V1 if _is_openai_v1() else OPENAI_METHODS_V0
604
605        for resource in resources:
606            wrap_function_wrapper(
607                resource.module,
608                f"{resource.object}.{resource.method}",
609                _wrap(resource, self.initialize)
610                if resource.sync
611                else _wrap_async(resource, self.initialize),
612            )
613
614        setattr(openai, "langfuse_public_key", None)
615        setattr(openai, "langfuse_secret_key", None)
616        setattr(openai, "langfuse_host", None)
617        setattr(openai, "langfuse_debug", None)
618        setattr(openai, "langfuse_enabled", True)
619        setattr(openai, "flush_langfuse", self.flush)
modifier = <OpenAILangfuse object>
def auth_check():
626def auth_check():
627    if modifier._langfuse is None:
628        modifier.initialize()
629
630    return modifier._langfuse.auth_check()