- Notifications
You must be signed in to change notification settings - Fork 1
[WIP] logging schema #1
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
1059321 f9a0970 1580d60 7f8b5e5 87305d6 52001bb b4a3b63 ec56782 56b3348 9103c3e 127ba2a 9cec988 a33eba5 6aa7b69 af8d0fb 6010d58 212cbc0 fcb538b 1218582 8a36a63 5bed699 bb291e2 f0ed18b cc470eb File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,6 @@ | ||
| from langchain.logging.base import BaseLogger | ||
| from langchain.logging.sqlite import SqliteLogger | ||
| | ||
| | ||
| def get_logger() -> BaseLogger: | ||
| return SqliteLogger() |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,105 @@ | ||
| """Base interface for logging runs.""" | ||
| from abc import ABC, abstractmethod | ||
| from dataclasses import dataclass | ||
| from datetime import datetime | ||
| from typing import Any, Dict, List, Union | ||
| | ||
| from dataclasses_json import dataclass_json | ||
| | ||
| | ||
| @dataclass_json | ||
| @dataclass | ||
| class Run: | ||
| id: int | ||
| start_time: datetime | ||
| end_time: datetime | ||
| extra: Dict[str, Any] | ||
| error: Dict[str, Any] | ||
| execution_order: int | ||
| serialized: Dict[str, Any] | ||
| | ||
| | ||
| @dataclass_json | ||
| @dataclass | ||
| class LLMRun(Run): | ||
| prompts: Dict[str, Any] | ||
| response: Dict[str, Any] | ||
| | ||
| | ||
| @dataclass_json | ||
| @dataclass | ||
| class ChainRun(Run): | ||
| inputs: Dict[str, Any] | ||
| outputs: Dict[str, Any] | ||
| child_runs: List[Run] | ||
| | ||
| | ||
| @dataclass_json | ||
| @dataclass | ||
| class ToolRun(Run): | ||
| inputs: Dict[str, Any] | ||
| outputs: Dict[str, Any] | ||
| action: str | ||
| child_runs: List[Run] | ||
| | ||
| | ||
| class BaseLogger(ABC): | ||
| """Base interface for logging runs.""" | ||
| | ||
| @abstractmethod | ||
| def log_llm_run_start( | ||
| self, serialized: Dict[str, Any], prompts: List[str], **extra: str | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. is Dict[str, Any] the best form? what goes in extra? Owner Author There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not sure what the best type is for serialized objects. I was assuming they get serialized as Extra is for any extra fields that you might want to store. Left it in there for added flexibility, but it's not used rn | ||
| ) -> None: | ||
| """Log the start of an LLM run.""" | ||
| | ||
| @abstractmethod | ||
| def log_llm_run_end(self, response: Dict[str, Any], error=None) -> None: | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. practically, is error ever used currently Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. not sure response is the best type, should just be LLMResult? Owner Author There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. error is not used rn, but in the future, it would be good to log the error of a particular run if it didn't succeed. Agree on LLMResult | ||
| """Log the end of an LLM run.""" | ||
| | ||
| @abstractmethod | ||
| def log_chain_run_start( | ||
| self, serialized: Dict[str, Any], inputs: Dict[str, Any], **extra: str | ||
| ) -> None: | ||
| """Log the start of a chain run.""" | ||
| | ||
| @abstractmethod | ||
| def log_chain_run_end(self, outputs: Dict[str, Any], error=None) -> None: | ||
| """Log the end of a chain run.""" | ||
| | ||
| @abstractmethod | ||
| def log_tool_run_start( | ||
| self, | ||
| serialized: Dict[str, Any], | ||
| action: str, | ||
| inputs: Dict[str, Any], | ||
| Collaborator There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. right now, tool inputs/outputs are assumed to be a single string rather than multiple things. if we want to make multiple here, we should change in code as well Owner Author There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Makes sense, I'll change to single string | ||
| **extra: str | ||
| ) -> None: | ||
| """Log the start of a tool run.""" | ||
| | ||
| @abstractmethod | ||
| def log_tool_run_end(self, outputs: Dict[str, Any], error=None) -> None: | ||
| """Log the end of a tool run.""" | ||
| | ||
| @abstractmethod | ||
| def get_llm_runs(self, top_level_only: bool = False) -> List[LLMRun]: | ||
| """Return all the LLM runs.""" | ||
| | ||
| @abstractmethod | ||
| def get_chain_runs(self, top_level_only: bool = False) -> List[ChainRun]: | ||
| """Return all the chain runs.""" | ||
| | ||
| @abstractmethod | ||
| def get_tool_runs(self, top_level_only: bool = False) -> List[ToolRun]: | ||
| """Return all the tool runs.""" | ||
| | ||
| @abstractmethod | ||
| def get_llm_run(self, run_id: int) -> LLMRun: | ||
| """Return a specific LLM run.""" | ||
| | ||
| @abstractmethod | ||
| def get_chain_run(self, run_id: int) -> ChainRun: | ||
| """Return a specific chain run.""" | ||
| | ||
| @abstractmethod | ||
| def get_tool_run(self, run_id: int) -> ToolRun: | ||
| """Return a specific tool run.""" | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
we also call generate down below if cache exists but some things arent in cache, will want to put there as well