diff --git a/README.md b/README.md
index 2df6c43..122716e 100644
--- a/README.md
+++ b/README.md
@@ -242,7 +242,7 @@ chat.chat("What is the capital of France?", echo="all")
 
 This shows important information like tool call results, finish reasons, and more.
 
-If the problem isn't self-evident, you can also reach into the `.last_turn()`, which contains the full response object, with full details about the completion.
+If the problem isn't self-evident, you can also reach into the `.get_last_turn()`, which contains the full response object, with full details about the completion.
 
 
 <div style="display:flex;justify-content:center;">
diff --git a/docs/reference/Chat.qmd b/docs/reference/Chat.qmd
index e7c4eea..013826e 100644
--- a/docs/reference/Chat.qmd
+++ b/docs/reference/Chat.qmd
@@ -32,7 +32,7 @@ You should generally not create this object yourself, but instead call
 | [console](#chatlas.Chat.console) | Enter a chat console to interact with the LLM. |
 | [extract_data](#chatlas.Chat.extract_data) | Extract structured data from the given input. |
 | [extract_data_async](#chatlas.Chat.extract_data_async) | Extract structured data from the given input asynchronously. |
-| [last_turn](#chatlas.Chat.last_turn) | Get the last turn in the chat with a specific role. |
+| [get_last_turn](#chatlas.Chat.get_last_turn) | Get the last turn in the chat with a specific role. |
 | [register_tool](#chatlas.Chat.register_tool) | Register a tool (function) with the chat. |
 | [set_turns](#chatlas.Chat.set_turns) | Set the turns of the chat. |
 | [tokens](#chatlas.Chat.tokens) | Get the tokens for each turn in the chat. |
@@ -158,7 +158,7 @@ Extract structured data from the given input asynchronously.
 |--------|-----------------------------------------------------|---------------------|
 |        | [dict](`dict`)\[[str](`str`), [Any](`typing.Any`)\] | The extracted data. |
 
-### last_turn { #chatlas.Chat.last_turn }
+### get_last_turn { #chatlas.Chat.get_last_turn }
 
 ```python
 Chat.get_last_turn(role='assistant')
diff --git a/tests/test_provider_anthropic.py b/tests/test_provider_anthropic.py
index 2d5a4ac..1a2a24b 100644
--- a/tests/test_provider_anthropic.py
+++ b/tests/test_provider_anthropic.py
@@ -37,7 +37,7 @@ async def test_anthropic_simple_streaming_request():
     async for x in foo:
         res.append(x)
     assert "2" in "".join(res)
-    turn = chat.last_turn()
+    turn = chat.get_last_turn()
     assert turn is not None
     assert turn.finish_reason == "end_turn"
 
diff --git a/tests/test_provider_google.py b/tests/test_provider_google.py
index eb7718d..dbf9bc5 100644
--- a/tests/test_provider_google.py
+++ b/tests/test_provider_google.py
@@ -2,7 +2,6 @@
 import time
 
 import pytest
-
 from chatlas import ChatGoogle
 
 from .conftest import (
@@ -41,7 +40,7 @@ async def test_google_simple_streaming_request():
     async for x in await chat.stream_async("What is 1 + 1?"):
         res.append(x)
     assert "2" in "".join(res)
-    turn = chat.last_turn()
+    turn = chat.get_last_turn()
     assert turn is not None
     assert turn.finish_reason == "STOP"
 
diff --git a/tests/test_provider_openai.py b/tests/test_provider_openai.py
index a921474..05c16a9 100644
--- a/tests/test_provider_openai.py
+++ b/tests/test_provider_openai.py
@@ -34,7 +34,7 @@ async def test_openai_simple_streaming_request():
     async for x in await chat.stream_async("What is 1 + 1?"):
         res.append(x)
     assert "2" in "".join(res)
-    turn = chat.last_turn()
+    turn = chat.get_last_turn()
     assert turn is not None
     assert turn.finish_reason == "stop"