@@ -61,7 +61,7 @@ static void InitializeConsumers(py::class_<DuckDBPyRelation> &m) {
61
61
py::arg (" date_as_object" ) = false )
62
62
.def (" fetch_df_chunk" , &DuckDBPyRelation::FetchDFChunk, " Execute and fetch a chunk of the rows" ,
63
63
py::arg (" vectors_per_chunk" ) = 1 , py::kw_only (), py::arg (" date_as_object" ) = false )
64
- .def (" arrow" , &DuckDBPyRelation::ToArrowTable , " Execute and fetch all rows as an Arrow Table " ,
64
+ .def (" arrow" , &DuckDBPyRelation::ToRecordBatch , " Execute and return an Arrow Record Batch Reader that yields all rows " ,
65
65
py::arg (" batch_size" ) = 1000000 )
66
66
.def (" fetch_arrow_table" , &DuckDBPyRelation::ToArrowTable, " Execute and fetch all rows as an Arrow Table" ,
67
67
py::arg (" batch_size" ) = 1000000 )
@@ -78,7 +78,7 @@ static void InitializeConsumers(py::class_<DuckDBPyRelation> &m) {
78
78
)" ;
79
79
m.def (" __arrow_c_stream__" , &DuckDBPyRelation::ToArrowCapsule, capsule_docs,
80
80
py::arg (" requested_schema" ) = py::none ());
81
- m.def (" record_batch " , &DuckDBPyRelation::ToRecordBatch,
81
+ m.def (" fetch_record_batch " , &DuckDBPyRelation::ToRecordBatch,
82
82
" Execute and return an Arrow Record Batch Reader that yields all rows" , py::arg (" batch_size" ) = 1000000 )
83
83
.def (" fetch_arrow_reader" , &DuckDBPyRelation::ToRecordBatch,
84
84
" Execute and return an Arrow Record Batch Reader that yields all rows" , py::arg (" batch_size" ) = 1000000 );
0 commit comments