Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions langchain/agents/agent.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@
from langchain.chains.llm import LLMChain
from langchain.input import get_color_mapping
from langchain.llms.base import LLM
from langchain.logging import get_logger
from langchain.prompts.base import BasePromptTemplate
from langchain.schema import AgentAction

Expand Down Expand Up @@ -147,7 +148,13 @@ def _call(self, inputs: Dict[str, str]) -> Dict[str, str]:
if output.tool in name_to_tool_map:
chain = name_to_tool_map[output.tool]
# We then call the tool on the tool input to get an observation
get_logger().log_tool_run_start(
{"name": str(chain)[:60] + "..."},
output.tool,
{"input": output.tool_input},
)
observation = chain(output.tool_input)
get_logger().log_tool_run_end({"output": observation})
color = color_mapping[output.tool]
else:
observation = f"{output.tool} is not a valid tool, try another one."
Expand Down
3 changes: 3 additions & 0 deletions langchain/chains/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
from pydantic import BaseModel, Extra, Field

import langchain
from langchain.logging import get_logger


class Memory(BaseModel, ABC):
Expand Down Expand Up @@ -104,7 +105,9 @@ def __call__(
print(
f"\n\n\033[1m> Entering new {self.__class__.__name__} chain...\033[0m"
)
get_logger().log_chain_run_start({"name": self.__class__.__name__}, inputs)
outputs = self._call(inputs)
get_logger().log_chain_run_end({"outputs": outputs})
if self.verbose:
print(f"\n\033[1m> Finished {self.__class__.__name__} chain.\033[0m")
self._validate_outputs(outputs)
Expand Down
1 change: 1 addition & 0 deletions langchain/chains/llm_math/base.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
"""Chain that interprets a prompt and executes python code to do math."""
import time
from typing import Dict, List

from pydantic import BaseModel, Extra
Expand Down
6 changes: 5 additions & 1 deletion langchain/llms/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@
from pydantic import BaseModel, Extra

import langchain
from langchain.logging import get_logger
from langchain.schema import Generation


Expand Down Expand Up @@ -45,7 +46,10 @@ def generate(
) -> LLMResult:
"""Run the LLM on the given prompt and input."""
if langchain.llm_cache is None:
return self._generate(prompts, stop=stop)
get_logger().log_llm_run_start({"name": self.__class__.__name__}, prompts)
output = self._generate(prompts, stop=stop)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

we also call generate down below if cache exists but some things arent in cache, will want to put there as well

get_logger().log_llm_run_end(output.generations)
return output
params = self._llm_dict()
params["stop"] = stop
llm_string = str(sorted([(k, v) for k, v in params.items()]))
Expand Down
6 changes: 6 additions & 0 deletions langchain/logging/__init__.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
from langchain.logging.base import BaseLogger
from langchain.logging.sqlite import SqliteLogger


def get_logger() -> BaseLogger:
return SqliteLogger()
105 changes: 105 additions & 0 deletions langchain/logging/base.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,105 @@
"""Base interface for logging runs."""
from abc import ABC, abstractmethod
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Union

from dataclasses_json import dataclass_json


@dataclass_json
@dataclass
class Run:
id: int
start_time: datetime
end_time: datetime
extra: Dict[str, Any]
error: Dict[str, Any]
execution_order: int
serialized: Dict[str, Any]


@dataclass_json
@dataclass
class LLMRun(Run):
prompts: Dict[str, Any]
response: Dict[str, Any]


@dataclass_json
@dataclass
class ChainRun(Run):
inputs: Dict[str, Any]
outputs: Dict[str, Any]
child_runs: List[Run]


@dataclass_json
@dataclass
class ToolRun(Run):
inputs: Dict[str, Any]
outputs: Dict[str, Any]
action: str
child_runs: List[Run]


class BaseLogger(ABC):
"""Base interface for logging runs."""

@abstractmethod
def log_llm_run_start(
self, serialized: Dict[str, Any], prompts: List[str], **extra: str
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is Dict[str, Any] the best form?

what goes in extra?

Copy link
Owner Author

@agola11 agola11 Dec 23, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Not sure what the best type is for serialized objects. I was assuming they get serialized as json, in which case Dict[str, Any] could work.

Extra is for any extra fields that you might want to store. Left it in there for added flexibility, but it's not used rn

) -> None:
"""Log the start of an LLM run."""

@abstractmethod
def log_llm_run_end(self, response: Dict[str, Any], error=None) -> None:
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

practically, is error ever used currently

Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

not sure response is the best type, should just be LLMResult?

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

error is not used rn, but in the future, it would be good to log the error of a particular run if it didn't succeed.

Agree on LLMResult

"""Log the end of an LLM run."""

@abstractmethod
def log_chain_run_start(
self, serialized: Dict[str, Any], inputs: Dict[str, Any], **extra: str
) -> None:
"""Log the start of a chain run."""

@abstractmethod
def log_chain_run_end(self, outputs: Dict[str, Any], error=None) -> None:
"""Log the end of a chain run."""

@abstractmethod
def log_tool_run_start(
self,
serialized: Dict[str, Any],
action: str,
inputs: Dict[str, Any],
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

right now, tool inputs/outputs are assumed to be a single string rather than multiple things. if we want to make multiple here, we should change in code as well

Copy link
Owner Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense, I'll change to single string

**extra: str
) -> None:
"""Log the start of a tool run."""

@abstractmethod
def log_tool_run_end(self, outputs: Dict[str, Any], error=None) -> None:
"""Log the end of a tool run."""

@abstractmethod
def get_llm_runs(self, top_level_only: bool = False) -> List[LLMRun]:
"""Return all the LLM runs."""

@abstractmethod
def get_chain_runs(self, top_level_only: bool = False) -> List[ChainRun]:
"""Return all the chain runs."""

@abstractmethod
def get_tool_runs(self, top_level_only: bool = False) -> List[ToolRun]:
"""Return all the tool runs."""

@abstractmethod
def get_llm_run(self, run_id: int) -> LLMRun:
"""Return a specific LLM run."""

@abstractmethod
def get_chain_run(self, run_id: int) -> ChainRun:
"""Return a specific chain run."""

@abstractmethod
def get_tool_run(self, run_id: int) -> ToolRun:
"""Return a specific tool run."""
Empty file.
Loading