kandapoha
kandapoha2mo ago

```

@conditional_observe()
def conversation_chain(arg1: str, arg2: str, api_key: str, prompts):
model = ChatOpenAI(
model="gpt-4-turbo",
temperature=0,
max_tokens=1000,
model_kwargs={"frequency_penalty": 2.0},
api_key=api_key,
)

parallel_chain = {
f"chain_{idx}": ChatPromptTemplate.from_messages(
[
("system", prompt),
("human", "{user_input}"),
]
)
| model
| StrOutputParser()
for idx, prompt in enumerate(prompts)
}

synthesizer_chain = (
ChatPromptTemplate.from_messages([("system", SYNTHESIZER_PROMPT)]) | model
)

chain = {
"input1": itemgetter("input1"),
"input2": itemgetter("input2"),
"input3": itemgetter("input3"),
"input4": itemgetter("input4"),
"input5": itemgetter("input5"),
"input6": parallel_chain
| RunnableLambda(
lambda input_dict: [
value for key, value in input_dict.items() if key.startswith("chain_")
]
),
} | synthesizer_chain

return chain.invoke(
{
"input1": "...",
"input2": "...",
"input3": "...",
"input4": "...",
"input5": "...",
},
config={"callbacks": [langfuse_handler]}
)
@conditional_observe()
def conversation_chain(arg1: str, arg2: str, api_key: str, prompts):
model = ChatOpenAI(
model="gpt-4-turbo",
temperature=0,
max_tokens=1000,
model_kwargs={"frequency_penalty": 2.0},
api_key=api_key,
)

parallel_chain = {
f"chain_{idx}": ChatPromptTemplate.from_messages(
[
("system", prompt),
("human", "{user_input}"),
]
)
| model
| StrOutputParser()
for idx, prompt in enumerate(prompts)
}

synthesizer_chain = (
ChatPromptTemplate.from_messages([("system", SYNTHESIZER_PROMPT)]) | model
)

chain = {
"input1": itemgetter("input1"),
"input2": itemgetter("input2"),
"input3": itemgetter("input3"),
"input4": itemgetter("input4"),
"input5": itemgetter("input5"),
"input6": parallel_chain
| RunnableLambda(
lambda input_dict: [
value for key, value in input_dict.items() if key.startswith("chain_")
]
),
} | synthesizer_chain

return chain.invoke(
{
"input1": "...",
"input2": "...",
"input3": "...",
"input4": "...",
"input5": "...",
},
config={"callbacks": [langfuse_handler]}
)
Is there a way in which we can disable the input and output values in tracing for all the nested spans/Runnables in the chain?
2 Replies
kandapoha
kandapoha2mo ago
I've also created a discussion for this: https://github.com/orgs/langfuse/discussions/2706
GitHub
Disable tracing of input and output for nested elements with in a c...
We are using the decorator API and the Lanchain integration. We are also self-hosting Langfuse, currently on v2.63.1. We wanted to in specific cases NOT trace the input and output for our all the s...
Marc
Marc2mo ago
Thanks for moving this to GitHub which is the best place to have in depth discussions to figure this out together Happy to help make this work!