Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 17 additions & 3 deletions crawl4ai/async_crawler_strategy.py
Original file line number Diff line number Diff line change
Expand Up @@ -1047,14 +1047,28 @@ async def get_delayed_content(delay: float = 5.0) -> str:
raise e

finally:
# If no session_id is given we should close the page
# Clean up page after crawl completes
# For managed CDP browsers, close pages that are not part of a session to prevent memory leaks
all_contexts = page.context.browser.contexts
total_pages = sum(len(context.pages) for context in all_contexts)
total_pages = sum(len(context.pages) for context in all_contexts)

should_close_page = False

if config.session_id:
# Session pages are kept alive for reuse
pass
elif total_pages <= 1 and (self.browser_config.use_managed_browser or self.browser_config.headless):
elif self.browser_config.use_managed_browser:
# For managed browsers (CDP), close non-session pages to prevent tab accumulation
# This is especially important for arun_many() with multiple concurrent crawls
should_close_page = True
elif total_pages <= 1 and self.browser_config.headless:
# Keep the last page in headless mode to avoid closing the browser
pass
else:
# For non-managed browsers, close the page
should_close_page = True

if should_close_page:
# Detach listeners before closing to prevent potential errors during close
if config.capture_network_requests:
page.remove_listener("request", handle_request_capture)
Expand Down
36 changes: 11 additions & 25 deletions crawl4ai/browser_manager.py
Original file line number Diff line number Diff line change
Expand Up @@ -1035,34 +1035,20 @@ async def get_page(self, crawlerRunConfig: CrawlerRunConfig):
self.sessions[crawlerRunConfig.session_id] = (context, page, time.time())
return page, context

# If using a managed browser, just grab the shared default_context
# If using a managed browser, reuse the default context and create new pages
if self.config.use_managed_browser:
context = self.default_context
if self.config.storage_state:
context = await self.create_browser_context(crawlerRunConfig)
ctx = self.default_context # default context, one window only
# Clone runtime state from storage to the shared context
ctx = self.default_context
ctx = await clone_runtime_state(context, ctx, crawlerRunConfig, self.config)
# Avoid concurrent new_page on shared persistent context
# See GH-1198: context.pages can be empty under races
async with self._page_lock:
page = await ctx.new_page()
await self._apply_stealth_to_page(page)
else:
context = self.default_context
pages = context.pages
page = next((p for p in pages if p.url == crawlerRunConfig.url), None)
if not page:
if pages:
page = pages[0]
else:
# Double-check under lock to avoid TOCTOU and ensure only
# one task calls new_page when pages=[] concurrently
async with self._page_lock:
pages = context.pages
if pages:
page = pages[0]
else:
page = await context.new_page()
await self._apply_stealth_to_page(page)

# Always create a new page for concurrent safety
# The page-level isolation prevents race conditions while sharing the same context
async with self._page_lock:
page = await context.new_page()

await self._apply_stealth_to_page(page)
else:
# Otherwise, check if we have an existing context for this config
config_signature = self._make_config_signature(crawlerRunConfig)
Expand Down
Loading