Skip to content

Commit 17dae60

Browse files
committed
Merge branch 'main' into improve_error_message_for_missing_dependencies
2 parents 4efb33a + 5736b96 commit 17dae60

File tree

88 files changed

+1091
-563
lines changed

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

88 files changed

+1091
-563
lines changed

Diff for: .github/CODEOWNERS

-2
Original file line numberDiff line numberDiff line change
@@ -10,10 +10,8 @@ doc/source/development @noatamir
1010

1111
# pandas
1212
pandas/_libs/ @WillAyd
13-
pandas/_libs/tslibs/* @MarcoGorelli
1413
pandas/_typing.py @Dr-Irv
1514
pandas/core/groupby/* @rhshadrach
16-
pandas/core/tools/datetimes.py @MarcoGorelli
1715
pandas/io/excel/* @rhshadrach
1816
pandas/io/formats/style.py @attack68
1917
pandas/io/formats/style_render.py @attack68

Diff for: .github/workflows/wheels.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -153,7 +153,7 @@ jobs:
153153
run: echo "sdist_name=$(cd ./dist && ls -d */)" >> "$GITHUB_ENV"
154154

155155
- name: Build wheels
156-
uses: pypa/[email protected].0
156+
uses: pypa/[email protected].2
157157
with:
158158
package-dir: ./dist/${{ startsWith(matrix.buildplat[1], 'macosx') && env.sdist_name || needs.build_sdist.outputs.sdist_file }}
159159
env:

Diff for: asv_bench/benchmarks/indexing_engines.py

+16
Original file line numberDiff line numberDiff line change
@@ -67,6 +67,14 @@ class NumericEngineIndexing:
6767
def setup(self, engine_and_dtype, index_type, unique, N):
6868
engine, dtype = engine_and_dtype
6969

70+
if (
71+
index_type == "non_monotonic"
72+
and dtype in [np.int16, np.int8, np.uint8]
73+
and unique
74+
):
75+
# Values overflow
76+
raise NotImplementedError
77+
7078
if index_type == "monotonic_incr":
7179
if unique:
7280
arr = np.arange(N * 3, dtype=dtype)
@@ -115,6 +123,14 @@ def setup(self, engine_and_dtype, index_type, unique, N):
115123
engine, dtype = engine_and_dtype
116124
dtype = dtype.lower()
117125

126+
if (
127+
index_type == "non_monotonic"
128+
and dtype in ["int16", "int8", "uint8"]
129+
and unique
130+
):
131+
# Values overflow
132+
raise NotImplementedError
133+
118134
if index_type == "monotonic_incr":
119135
if unique:
120136
arr = np.arange(N * 3, dtype=dtype)

Diff for: ci/code_checks.sh

-4
Original file line numberDiff line numberDiff line change
@@ -72,13 +72,9 @@ if [[ -z "$CHECK" || "$CHECK" == "docstrings" ]]; then
7272
-i "pandas.Series.dt PR01" `# Accessors are implemented as classes, but we do not document the Parameters section` \
7373
-i "pandas.Period.freq GL08" \
7474
-i "pandas.Period.ordinal GL08" \
75-
-i "pandas.Timedelta.max PR02" \
76-
-i "pandas.Timedelta.min PR02" \
77-
-i "pandas.Timedelta.resolution PR02" \
7875
-i "pandas.Timestamp.max PR02" \
7976
-i "pandas.Timestamp.min PR02" \
8077
-i "pandas.Timestamp.resolution PR02" \
81-
-i "pandas.Timestamp.tzinfo GL08" \
8278
-i "pandas.core.groupby.DataFrameGroupBy.plot PR02" \
8379
-i "pandas.core.groupby.SeriesGroupBy.plot PR02" \
8480
-i "pandas.core.resample.Resampler.quantile PR01,PR07" \

Diff for: ci/meta.yaml

-1
Original file line numberDiff line numberDiff line change
@@ -89,4 +89,3 @@ extra:
8989
- datapythonista
9090
- phofl
9191
- lithomas1
92-
- marcogorelli

Diff for: doc/source/development/contributing_codebase.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -537,7 +537,7 @@ Preferred ``pytest`` idioms
537537
test and does not check if the test will fail. If this is the behavior you desire, use ``pytest.skip`` instead.
538538

539539
If a test is known to fail but the manner in which it fails
540-
is not meant to be captured, use ``pytest.mark.xfail`` It is common to use this method for a test that
540+
is not meant to be captured, use ``pytest.mark.xfail``. It is common to use this method for a test that
541541
exhibits buggy behavior or a non-implemented feature. If
542542
the failing test has flaky behavior, use the argument ``strict=False``. This
543543
will make it so pytest does not fail if the test happens to pass. Using ``strict=False`` is highly undesirable, please use it only as a last resort.

Diff for: doc/source/development/debugging_extensions.rst

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ By default building pandas from source will generate a release build. To generat
2323

2424
.. note::
2525

26-
conda environments update CFLAGS/CPPFLAGS with flags that are geared towards generating releases. If using conda, you may need to set ``CFLAGS="$CFLAGS -O0"`` and ``CPPFLAGS="$CPPFLAGS -O0"`` to ensure optimizations are turned off for debugging
26+
conda environments update CFLAGS/CPPFLAGS with flags that are geared towards generating releases, and may work counter towards usage in a development environment. If using conda, you should unset these environment variables via ``export CFLAGS=`` and ``export CPPFLAGS=``
2727

2828
By specifying ``builddir="debug"`` all of the targets will be built and placed in the debug directory relative to the project root. This helps to keep your debug and release artifacts separate; you are of course able to choose a different directory name or omit altogether if you do not care to separate build types.
2929

Diff for: doc/source/getting_started/comparison/comparison_with_r.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -383,7 +383,7 @@ In Python, since ``a`` is a list, you can simply use list comprehension.
383383

384384
.. ipython:: python
385385
386-
a = np.array(list(range(1, 24)) + [np.NAN]).reshape(2, 3, 4)
386+
a = np.array(list(range(1, 24)) + [np.nan]).reshape(2, 3, 4)
387387
pd.DataFrame([tuple(list(x) + [val]) for x, val in np.ndenumerate(a)])
388388
389389
meltlist
@@ -402,7 +402,7 @@ In Python, this list would be a list of tuples, so
402402

403403
.. ipython:: python
404404
405-
a = list(enumerate(list(range(1, 5)) + [np.NAN]))
405+
a = list(enumerate(list(range(1, 5)) + [np.nan]))
406406
pd.DataFrame(a)
407407
408408
For more details and examples see :ref:`the Intro to Data Structures

Diff for: doc/source/user_guide/basics.rst

+8-8
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,7 @@ of elements to display is five, but you may pass a custom number.
3636
Attributes and underlying data
3737
------------------------------
3838

39-
pandas objects have a number of attributes enabling you to access the metadata
39+
pandas objects have a number of attributes enabling you to access the metadata.
4040

4141
* **shape**: gives the axis dimensions of the object, consistent with ndarray
4242
* Axis labels
@@ -59,7 +59,7 @@ NumPy's type system to add support for custom arrays
5959
(see :ref:`basics.dtypes`).
6060

6161
To get the actual data inside a :class:`Index` or :class:`Series`, use
62-
the ``.array`` property
62+
the ``.array`` property.
6363

6464
.. ipython:: python
6565
@@ -88,18 +88,18 @@ NumPy doesn't have a dtype to represent timezone-aware datetimes, so there
8888
are two possibly useful representations:
8989

9090
1. An object-dtype :class:`numpy.ndarray` with :class:`Timestamp` objects, each
91-
with the correct ``tz``
91+
with the correct ``tz``.
9292
2. A ``datetime64[ns]`` -dtype :class:`numpy.ndarray`, where the values have
93-
been converted to UTC and the timezone discarded
93+
been converted to UTC and the timezone discarded.
9494

95-
Timezones may be preserved with ``dtype=object``
95+
Timezones may be preserved with ``dtype=object``:
9696

9797
.. ipython:: python
9898
9999
ser = pd.Series(pd.date_range("2000", periods=2, tz="CET"))
100100
ser.to_numpy(dtype=object)
101101
102-
Or thrown away with ``dtype='datetime64[ns]'``
102+
Or thrown away with ``dtype='datetime64[ns]'``:
103103

104104
.. ipython:: python
105105
@@ -2064,12 +2064,12 @@ different numeric dtypes will **NOT** be combined. The following example will gi
20642064

20652065
.. ipython:: python
20662066
2067-
df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float32")
2067+
df1 = pd.DataFrame(np.random.randn(8, 1), columns=["A"], dtype="float64")
20682068
df1
20692069
df1.dtypes
20702070
df2 = pd.DataFrame(
20712071
{
2072-
"A": pd.Series(np.random.randn(8), dtype="float16"),
2072+
"A": pd.Series(np.random.randn(8), dtype="float32"),
20732073
"B": pd.Series(np.random.randn(8)),
20742074
"C": pd.Series(np.random.randint(0, 255, size=8), dtype="uint8"), # [0,255] (range of uint8)
20752075
}

Diff for: doc/source/user_guide/enhancingperf.rst

+2
Original file line numberDiff line numberDiff line change
@@ -171,6 +171,7 @@ can be improved by passing an ``np.ndarray``.
171171
In [4]: %%cython
172172
...: cimport numpy as np
173173
...: import numpy as np
174+
...: np.import_array()
174175
...: cdef double f_typed(double x) except? -2:
175176
...: return x * (x - 1)
176177
...: cpdef double integrate_f_typed(double a, double b, int N):
@@ -225,6 +226,7 @@ and ``wraparound`` checks can yield more performance.
225226
...: cimport cython
226227
...: cimport numpy as np
227228
...: import numpy as np
229+
...: np.import_array()
228230
...: cdef np.float64_t f_typed(np.float64_t x) except? -2:
229231
...: return x * (x - 1)
230232
...: cpdef np.float64_t integrate_f_typed(np.float64_t a, np.float64_t b, np.int64_t N):

Diff for: doc/source/whatsnew/v0.11.0.rst

+2-2
Original file line numberDiff line numberDiff line change
@@ -74,10 +74,10 @@ Numeric dtypes will propagate and can coexist in DataFrames. If a dtype is passe
7474

7575
.. ipython:: python
7676
77-
df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float32')
77+
df1 = pd.DataFrame(np.random.randn(8, 1), columns=['A'], dtype='float64')
7878
df1
7979
df1.dtypes
80-
df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float16'),
80+
df2 = pd.DataFrame({'A': pd.Series(np.random.randn(8), dtype='float32'),
8181
'B': pd.Series(np.random.randn(8)),
8282
'C': pd.Series(range(8), dtype='uint8')})
8383
df2

Diff for: doc/source/whatsnew/v3.0.0.rst

+11
Original file line numberDiff line numberDiff line change
@@ -61,11 +61,13 @@ Other enhancements
6161
- :meth:`Series.cummin` and :meth:`Series.cummax` now supports :class:`CategoricalDtype` (:issue:`52335`)
6262
- :meth:`Series.plot` now correctly handle the ``ylabel`` parameter for pie charts, allowing for explicit control over the y-axis label (:issue:`58239`)
6363
- :meth:`DataFrame.plot.scatter` argument ``c`` now accepts a column of strings, where rows with the same string are colored identically (:issue:`16827` and :issue:`16485`)
64+
- :class:`ArrowDtype` now supports ``pyarrow.JsonType`` (:issue:`60958`)
6465
- :class:`DataFrameGroupBy` and :class:`SeriesGroupBy` methods ``sum``, ``mean``, ``median``, ``prod``, ``min``, ``max``, ``std``, ``var`` and ``sem`` now accept ``skipna`` parameter (:issue:`15675`)
6566
- :class:`Rolling` and :class:`Expanding` now support ``nunique`` (:issue:`26958`)
6667
- :class:`Rolling` and :class:`Expanding` now support aggregations ``first`` and ``last`` (:issue:`33155`)
6768
- :func:`read_parquet` accepts ``to_pandas_kwargs`` which are forwarded to :meth:`pyarrow.Table.to_pandas` which enables passing additional keywords to customize the conversion to pandas, such as ``maps_as_pydicts`` to read the Parquet map data type as python dictionaries (:issue:`56842`)
6869
- :meth:`.DataFrameGroupBy.transform`, :meth:`.SeriesGroupBy.transform`, :meth:`.DataFrameGroupBy.agg`, :meth:`.SeriesGroupBy.agg`, :meth:`.SeriesGroupBy.apply`, :meth:`.DataFrameGroupBy.apply` now support ``kurt`` (:issue:`40139`)
70+
- :meth:`DataFrame.apply` supports using third-party execution engines like the Bodo.ai JIT compiler (:issue:`60668`)
6971
- :meth:`DataFrameGroupBy.transform`, :meth:`SeriesGroupBy.transform`, :meth:`DataFrameGroupBy.agg`, :meth:`SeriesGroupBy.agg`, :meth:`RollingGroupby.apply`, :meth:`ExpandingGroupby.apply`, :meth:`Rolling.apply`, :meth:`Expanding.apply`, :meth:`DataFrame.apply` with ``engine="numba"`` now supports positional arguments passed as kwargs (:issue:`58995`)
7072
- :meth:`Rolling.agg`, :meth:`Expanding.agg` and :meth:`ExponentialMovingWindow.agg` now accept :class:`NamedAgg` aggregations through ``**kwargs`` (:issue:`28333`)
7173
- :meth:`Series.map` can now accept kwargs to pass on to func (:issue:`59814`)
@@ -634,6 +636,7 @@ Bug fixes
634636
Categorical
635637
^^^^^^^^^^^
636638
- Bug in :func:`Series.apply` where ``nan`` was ignored for :class:`CategoricalDtype` (:issue:`59938`)
639+
- Bug in :meth:`Series.convert_dtypes` with ``dtype_backend="pyarrow"`` where empty :class:`CategoricalDtype` :class:`Series` raised an error or got converted to ``null[pyarrow]`` (:issue:`59934`)
637640
-
638641

639642
Datetimelike
@@ -670,6 +673,7 @@ Timezones
670673

671674
Numeric
672675
^^^^^^^
676+
- Bug in :meth:`DataFrame.corr` where numerical precision errors resulted in correlations above ``1.0`` (:issue:`61120`)
673677
- Bug in :meth:`DataFrame.quantile` where the column type was not preserved when ``numeric_only=True`` with a list-like ``q`` produced an empty result (:issue:`59035`)
674678
- Bug in ``np.matmul`` with :class:`Index` inputs raising a ``TypeError`` (:issue:`57079`)
675679

@@ -701,6 +705,7 @@ Indexing
701705
- Bug in :meth:`Index.get_indexer` and similar methods when ``NaN`` is located at or after position 128 (:issue:`58924`)
702706
- Bug in :meth:`MultiIndex.insert` when a new value inserted to a datetime-like level gets cast to ``NaT`` and fails indexing (:issue:`60388`)
703707
- Bug in printing :attr:`Index.names` and :attr:`MultiIndex.levels` would not escape single quotes (:issue:`60190`)
708+
- Bug in reindexing of :class:`DataFrame` with :class:`PeriodDtype` columns in case of consolidated block (:issue:`60980`, :issue:`60273`)
704709

705710
Missing
706711
^^^^^^^
@@ -737,6 +742,7 @@ I/O
737742
- Bug in :meth:`read_csv` where the order of the ``na_values`` makes an inconsistency when ``na_values`` is a list non-string values. (:issue:`59303`)
738743
- Bug in :meth:`read_excel` raising ``ValueError`` when passing array of boolean values when ``dtype="boolean"``. (:issue:`58159`)
739744
- Bug in :meth:`read_html` where ``rowspan`` in header row causes incorrect conversion to ``DataFrame``. (:issue:`60210`)
745+
- Bug in :meth:`read_json` ignoring the given ``dtype`` when ``engine="pyarrow"`` (:issue:`59516`)
740746
- Bug in :meth:`read_json` not validating the ``typ`` argument to not be exactly ``"frame"`` or ``"series"`` (:issue:`59124`)
741747
- Bug in :meth:`read_json` where extreme value integers in string format were incorrectly parsed as a different integer number (:issue:`20608`)
742748
- Bug in :meth:`read_stata` raising ``KeyError`` when input file is stored in big-endian format and contains strL data. (:issue:`58638`)
@@ -767,6 +773,7 @@ Groupby/resample/rolling
767773
- Bug in :meth:`.DataFrameGroupBy.quantile` when ``interpolation="nearest"`` is inconsistent with :meth:`DataFrame.quantile` (:issue:`47942`)
768774
- Bug in :meth:`.Resampler.interpolate` on a :class:`DataFrame` with non-uniform sampling and/or indices not aligning with the resulting resampled index would result in wrong interpolation (:issue:`21351`)
769775
- Bug in :meth:`DataFrame.ewm` and :meth:`Series.ewm` when passed ``times`` and aggregation functions other than mean (:issue:`51695`)
776+
- Bug in :meth:`DataFrame.resample` changing index type to :class:`MultiIndex` when the dataframe is empty and using an upsample method (:issue:`55572`)
770777
- Bug in :meth:`DataFrameGroupBy.agg` that raises ``AttributeError`` when there is dictionary input and duplicated columns, instead of returning a DataFrame with the aggregation of all duplicate columns. (:issue:`55041`)
771778
- Bug in :meth:`DataFrameGroupBy.apply` and :meth:`SeriesGroupBy.apply` for empty data frame with ``group_keys=False`` still creating output index using group keys. (:issue:`60471`)
772779
- Bug in :meth:`DataFrameGroupBy.apply` that was returning a completely empty DataFrame when all return values of ``func`` were ``None`` instead of returning an empty DataFrame with the original columns and dtypes. (:issue:`57775`)
@@ -818,6 +825,7 @@ Other
818825
- Bug in :class:`DataFrame` when passing a ``dict`` with a NA scalar and ``columns`` that would always return ``np.nan`` (:issue:`57205`)
819826
- Bug in :class:`Series` ignoring errors when trying to convert :class:`Series` input data to the given ``dtype`` (:issue:`60728`)
820827
- Bug in :func:`eval` on :class:`ExtensionArray` on including division ``/`` failed with a ``TypeError``. (:issue:`58748`)
828+
- Bug in :func:`eval` where method calls on binary operations like ``(x + y).dropna()`` would raise ``AttributeError: 'BinOp' object has no attribute 'value'`` (:issue:`61175`)
821829
- Bug in :func:`eval` where the names of the :class:`Series` were not preserved when using ``engine="numexpr"``. (:issue:`10239`)
822830
- Bug in :func:`eval` with ``engine="numexpr"`` returning unexpected result for float division. (:issue:`59736`)
823831
- Bug in :func:`to_numeric` raising ``TypeError`` when ``arg`` is a :class:`Timedelta` or :class:`Timestamp` scalar. (:issue:`59944`)
@@ -827,15 +835,18 @@ Other
827835
- Bug in :meth:`DataFrame.eval` and :meth:`DataFrame.query` which did not allow to use ``tan`` function. (:issue:`55091`)
828836
- Bug in :meth:`DataFrame.query` where using duplicate column names led to a ``TypeError``. (:issue:`59950`)
829837
- Bug in :meth:`DataFrame.query` which raised an exception or produced incorrect results when expressions contained backtick-quoted column names containing the hash character ``#``, backticks, or characters that fall outside the ASCII range (U+0001..U+007F). (:issue:`59285`) (:issue:`49633`)
838+
- Bug in :meth:`DataFrame.query` which raised an exception when querying integer column names using backticks. (:issue:`60494`)
830839
- Bug in :meth:`DataFrame.shift` where passing a ``freq`` on a DataFrame with no columns did not shift the index correctly. (:issue:`60102`)
831840
- Bug in :meth:`DataFrame.sort_index` when passing ``axis="columns"`` and ``ignore_index=True`` and ``ascending=False`` not returning a :class:`RangeIndex` columns (:issue:`57293`)
832841
- Bug in :meth:`DataFrame.transform` that was returning the wrong order unless the index was monotonically increasing. (:issue:`57069`)
833842
- Bug in :meth:`DataFrame.where` where using a non-bool type array in the function would return a ``ValueError`` instead of a ``TypeError`` (:issue:`56330`)
834843
- Bug in :meth:`Index.sort_values` when passing a key function that turns values into tuples, e.g. ``key=natsort.natsort_key``, would raise ``TypeError`` (:issue:`56081`)
835844
- Bug in :meth:`MultiIndex.fillna` error message was referring to ``isna`` instead of ``fillna`` (:issue:`60974`)
845+
- Bug in :meth:`Series.describe` where median percentile was always included when the ``percentiles`` argument was passed (:issue:`60550`).
836846
- Bug in :meth:`Series.diff` allowing non-integer values for the ``periods`` argument. (:issue:`56607`)
837847
- Bug in :meth:`Series.dt` methods in :class:`ArrowDtype` that were returning incorrect values. (:issue:`57355`)
838848
- Bug in :meth:`Series.isin` raising ``TypeError`` when series is large (>10**6) and ``values`` contains NA (:issue:`60678`)
849+
- Bug in :meth:`Series.mode` where an exception was raised when taking the mode with nullable types with no null values in the series. (:issue:`58926`)
839850
- Bug in :meth:`Series.rank` that doesn't preserve missing values for nullable integers when ``na_option='keep'``. (:issue:`56976`)
840851
- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` inconsistently replacing matching instances when ``regex=True`` and missing values are present. (:issue:`56599`)
841852
- Bug in :meth:`Series.replace` and :meth:`DataFrame.replace` throwing ``ValueError`` when ``regex=True`` and all NA values. (:issue:`60688`)

Diff for: environment.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@ dependencies:
2323

2424
# required dependencies
2525
- python-dateutil
26-
- numpy<2
26+
- numpy<3
2727

2828
# optional dependencies
2929
- beautifulsoup4>=4.11.2

Diff for: pandas/_libs/algos.pyx

+9-3
Original file line numberDiff line numberDiff line change
@@ -353,10 +353,9 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
353353
float64_t[:, ::1] result
354354
uint8_t[:, :] mask
355355
int64_t nobs = 0
356-
float64_t vx, vy, dx, dy, meanx, meany, divisor, ssqdmx, ssqdmy, covxy
356+
float64_t vx, vy, dx, dy, meanx, meany, divisor, ssqdmx, ssqdmy, covxy, val
357357

358358
N, K = (<object>mat).shape
359-
360359
if minp is None:
361360
minpv = 1
362361
else:
@@ -389,8 +388,15 @@ def nancorr(const float64_t[:, :] mat, bint cov=False, minp=None):
389388
else:
390389
divisor = (nobs - 1.0) if cov else sqrt(ssqdmx * ssqdmy)
391390

391+
# clip `covxy / divisor` to ensure coeff is within bounds
392392
if divisor != 0:
393-
result[xi, yi] = result[yi, xi] = covxy / divisor
393+
val = covxy / divisor
394+
if not cov:
395+
if val > 1.0:
396+
val = 1.0
397+
elif val < -1.0:
398+
val = -1.0
399+
result[xi, yi] = result[yi, xi] = val
394400
else:
395401
result[xi, yi] = result[yi, xi] = NaN
396402

Diff for: pandas/_libs/hashtable_func_helper.pxi.in

+1-1
Original file line numberDiff line numberDiff line change
@@ -430,7 +430,7 @@ def mode(ndarray[htfunc_t] values, bint dropna, const uint8_t[:] mask=None):
430430

431431
if na_counter > 0:
432432
res_mask = np.zeros(j+1, dtype=np.bool_)
433-
res_mask[j] = True
433+
res_mask[j] = (na_counter == max_count)
434434
return modes[:j + 1], res_mask
435435

436436

Diff for: pandas/_libs/lib.pyx

+2-2
Original file line numberDiff line numberDiff line change
@@ -1518,7 +1518,7 @@ cdef object _try_infer_map(object dtype):
15181518

15191519
def infer_dtype(value: object, skipna: bool = True) -> str:
15201520
"""
1521-
Return a string label of the type of a scalar or list-like of values.
1521+
Return a string label of the type of the elements in a list-like input.
15221522

15231523
This method inspects the elements of the provided input and determines
15241524
classification of its data type. It is particularly useful for
@@ -1527,7 +1527,7 @@ def infer_dtype(value: object, skipna: bool = True) -> str:
15271527

15281528
Parameters
15291529
----------
1530-
value : scalar, list, ndarray, or pandas type
1530+
value : list, ndarray, or pandas type
15311531
The input data to infer the dtype.
15321532
skipna : bool, default True
15331533
Ignore NaN values when inferring the type.

0 commit comments

Comments
 (0)