From 9918c65d44b8441738ab100b47e629861f8ee7f8 Mon Sep 17 00:00:00 2001 From: skorani Date: Sun, 4 Oct 2020 21:36:31 +0330 Subject: [PATCH 1/2] Fix visualization.rst Signed-off-by: skorani --- doc/source/user_guide/visualization.rst | 390 ++++++++++++------------ 1 file changed, 195 insertions(+), 195 deletions(-) diff --git a/doc/source/user_guide/visualization.rst b/doc/source/user_guide/visualization.rst index f41912445455d..2fa05f43ba357 100644 --- a/doc/source/user_guide/visualization.rst +++ b/doc/source/user_guide/visualization.rst @@ -11,7 +11,7 @@ We use the standard convention for referencing the matplotlib API: .. ipython:: python import matplotlib.pyplot as plt - plt.close('all') + plt.close("all") We provide the basics in pandas to easily create decent looking plots. See the :ref:`ecosystem ` section for visualization @@ -40,7 +40,7 @@ The ``plot`` method on Series and DataFrame is just a simple wrapper around .. ipython:: python ts = pd.Series(np.random.randn(1000), - index=pd.date_range('1/1/2000', periods=1000)) + index=pd.date_range("1/1/2000", periods=1000)) ts = ts.cumsum() @savefig series_plot_basic.png @@ -54,13 +54,13 @@ On DataFrame, :meth:`~DataFrame.plot` is a convenience to plot all of the column .. ipython:: python :suppress: - plt.close('all') + plt.close("all") np.random.seed(123456) .. ipython:: python df = pd.DataFrame(np.random.randn(1000, 4), - index=ts.index, columns=list('ABCD')) + index=ts.index, columns=list("ABCD")) df = df.cumsum() plt.figure(); @@ -73,17 +73,17 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() np.random.seed(123456) .. ipython:: python - df3 = pd.DataFrame(np.random.randn(1000, 2), columns=['B', 'C']).cumsum() - df3['A'] = pd.Series(list(range(len(df)))) + df3 = pd.DataFrame(np.random.randn(1000, 2), columns=["B", "C"]).cumsum() + df3["A"] = pd.Series(list(range(len(df)))) @savefig df_plot_xy.png - df3.plot(x='A', y='B') + df3.plot(x="A", y="B") .. note:: @@ -93,7 +93,7 @@ You can plot one column versus another using the ``x`` and ``y`` keywords in .. ipython:: python :suppress: - plt.close('all') + plt.close("all") .. _visualization.other: @@ -104,14 +104,14 @@ Plotting methods allow for a handful of plot styles other than the default line plot. These methods can be provided as the ``kind`` keyword argument to :meth:`~DataFrame.plot`, and include: -* :ref:`'bar' ` or :ref:`'barh' ` for bar plots -* :ref:`'hist' ` for histogram -* :ref:`'box' ` for boxplot -* :ref:`'kde' ` or :ref:`'density' ` for density plots -* :ref:`'area' ` for area plots -* :ref:`'scatter' ` for scatter plots -* :ref:`'hexbin' ` for hexagonal bin plots -* :ref:`'pie' ` for pie plots +* :ref:`""bar" ` or :ref:`"barh" ` for bar plots +* :ref:`"hist" ` for histogram +* :ref:`"box" ` for boxplot +* :ref:`"kde" ` or :ref:`"density" ` for density plots +* :ref:`"area" ` for area plots +* :ref:`"scatter" ` for scatter plots +* :ref:`"hexbin" ` for hexagonal bin plots +* :ref:`"pie" ` for pie plots For example, a bar plot can be created the following way: @@ -120,7 +120,7 @@ For example, a bar plot can be created the following way: plt.figure(); @savefig bar_plot_ex.png - df.iloc[5].plot(kind='bar'); + df.iloc[5].plot(kind="bar"); You can also create these other plots using the methods ``DataFrame.plot.`` instead of providing the ``kind`` keyword argument. This makes it easier to discover plot methods and the specific arguments they use: @@ -164,21 +164,21 @@ For labeled, non-time series data, you may wish to produce a bar plot: @savefig bar_plot_ex.png df.iloc[5].plot.bar() - plt.axhline(0, color='k'); + plt.axhline(0, color="k"); -Calling a DataFrame's :meth:`plot.bar() ` method produces a multiple +Calling a DataFrame"s :meth:`plot.bar() ` method produces a multiple bar plot: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() np.random.seed(123456) .. ipython:: python - df2 = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) + df2 = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"]) @savefig bar_plot_multi_ex.png df2.plot.bar(); @@ -188,7 +188,7 @@ To produce a stacked bar plot, pass ``stacked=True``: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() .. ipython:: python @@ -201,7 +201,7 @@ To get horizontal bar plots, use the ``barh`` method: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() .. ipython:: python @@ -218,8 +218,8 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri .. ipython:: python - df4 = pd.DataFrame({'a': np.random.randn(1000) + 1, 'b': np.random.randn(1000), - 'c': np.random.randn(1000) - 1}, columns=['a', 'b', 'c']) + df4 = pd.DataFrame({"a": np.random.randn(1000) + 1, "b": np.random.randn(1000), + "c": np.random.randn(1000) - 1}, columns=["a", "b", "c"]) plt.figure(); @@ -230,7 +230,7 @@ Histograms can be drawn by using the :meth:`DataFrame.plot.hist` and :meth:`Seri .. ipython:: python :suppress: - plt.close('all') + plt.close("all") A histogram can be stacked using ``stacked=True``. Bin size can be changed using the ``bins`` keyword. @@ -245,23 +245,23 @@ using the ``bins`` keyword. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") You can pass other keywords supported by matplotlib ``hist``. For example, horizontal and cumulative histograms can be drawn by -``orientation='horizontal'`` and ``cumulative=True``. +``orientation="horizontal"`` and ``cumulative=True``. .. ipython:: python plt.figure(); @savefig hist_new_kwargs.png - df4['a'].plot.hist(orientation='horizontal', cumulative=True) + df4["a"].plot.hist(orientation="horizontal", cumulative=True) .. ipython:: python :suppress: - plt.close('all') + plt.close("all") See the :meth:`hist ` method and the `matplotlib hist documentation `__ for more. @@ -274,12 +274,12 @@ The existing interface ``DataFrame.hist`` to plot histogram still can be used. plt.figure(); @savefig hist_plot_ex.png - df['A'].diff().hist() + df["A"].diff().hist() .. ipython:: python :suppress: - plt.close('all') + plt.close("all") :meth:`DataFrame.hist` plots the histograms of the columns on multiple subplots: @@ -289,7 +289,7 @@ subplots: plt.figure() @savefig frame_hist_ex.png - df.diff().hist(color='k', alpha=0.5, bins=50) + df.diff().hist(color="k", alpha=0.5, bins=50) The ``by`` keyword can be specified to plot grouped histograms: @@ -297,7 +297,7 @@ The ``by`` keyword can be specified to plot grouped histograms: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() np.random.seed(123456) @@ -323,12 +323,12 @@ a uniform random variable on [0,1). .. ipython:: python :suppress: - plt.close('all') + plt.close("all") np.random.seed(123456) .. ipython:: python - df = pd.DataFrame(np.random.rand(10, 5), columns=['A', 'B', 'C', 'D', 'E']) + df = pd.DataFrame(np.random.rand(10, 5), columns=["A", "B", "C", "D", "E"]) @savefig box_plot_new.png df.plot.box() @@ -348,16 +348,16 @@ more complicated colorization, you can get each drawn artists by passing .. ipython:: python - color = {'boxes': 'DarkGreen', 'whiskers': 'DarkOrange', - 'medians': 'DarkBlue', 'caps': 'Gray'} + color = {"boxes": "DarkGreen", "whiskers": "DarkOrange", + "medians": "DarkBlue", "caps": "Gray"} @savefig box_new_colorize.png - df.plot.box(color=color, sym='r+') + df.plot.box(color=color, sym="r+") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Also, you can pass other keywords supported by matplotlib ``boxplot``. For example, horizontal and custom-positioned boxplot can be drawn by @@ -378,7 +378,7 @@ The existing interface ``DataFrame.boxplot`` to plot boxplot still can be used. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") np.random.seed(123456) .. ipython:: python @@ -396,19 +396,19 @@ groupings. For instance, .. ipython:: python :suppress: - plt.close('all') + plt.close("all") np.random.seed(123456) .. ipython:: python :okwarning: - df = pd.DataFrame(np.random.rand(10, 2), columns=['Col1', 'Col2']) - df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) + df = pd.DataFrame(np.random.rand(10, 2), columns=["Col1", "Col2"]) + df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) plt.figure(); @savefig box_plot_ex2.png - bp = df.boxplot(by='X') + bp = df.boxplot(by="X") You can also pass a subset of columns to plot, as well as group by multiple columns: @@ -416,25 +416,25 @@ columns: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") np.random.seed(123456) .. ipython:: python :okwarning: - df = pd.DataFrame(np.random.rand(10, 3), columns=['Col1', 'Col2', 'Col3']) - df['X'] = pd.Series(['A', 'A', 'A', 'A', 'A', 'B', 'B', 'B', 'B', 'B']) - df['Y'] = pd.Series(['A', 'B', 'A', 'B', 'A', 'B', 'A', 'B', 'A', 'B']) + df = pd.DataFrame(np.random.rand(10, 3), columns=["Col1", "Col2", "Col3"]) + df["X"] = pd.Series(["A", "A", "A", "A", "A", "B", "B", "B", "B", "B"]) + df["Y"] = pd.Series(["A", "B", "A", "B", "A", "B", "A", "B", "A", "B"]) plt.figure(); @savefig box_plot_ex3.png - bp = df.boxplot(column=['Col1', 'Col2'], by=['X', 'Y']) + bp = df.boxplot(column=["Col1", "Col2"], by=["X", "Y"]) .. ipython:: python :suppress: - plt.close('all') + plt.close("all") .. _visualization.box.return: @@ -447,12 +447,12 @@ keyword, will affect the output type as well: ================ ======= ========================== ``None`` No axes ``None`` Yes 2-D ndarray of axes -``'axes'`` No axes -``'axes'`` Yes Series of axes -``'dict'`` No dict of artists -``'dict'`` Yes Series of dicts of artists -``'both'`` No namedtuple -``'both'`` Yes Series of namedtuples +``"axes"`` No axes +``"axes"`` Yes Series of axes +``"dict"`` No dict of artists +``"dict"`` Yes Series of dicts of artists +``"both"`` No namedtuple +``"both"`` Yes Series of namedtuples ================ ======= ========================== ``Groupby.boxplot`` always returns a ``Series`` of ``return_type``. @@ -462,16 +462,16 @@ keyword, will affect the output type as well: np.random.seed(1234) df_box = pd.DataFrame(np.random.randn(50, 2)) - df_box['g'] = np.random.choice(['A', 'B'], size=50) - df_box.loc[df_box['g'] == 'B', 1] += 3 + df_box["g"] = np.random.choice(["A", "B"], size=50) + df_box.loc[df_box["g"] == "B", 1] += 3 @savefig boxplot_groupby.png - bp = df_box.boxplot(by='g') + bp = df_box.boxplot(by="g") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") The subplots above are split by the numeric columns first, then the value of the ``g`` column. Below the subplots are first split by the value of ``g``, @@ -481,12 +481,12 @@ then by the numeric columns. :okwarning: @savefig groupby_boxplot_vis.png - bp = df_box.groupby('g').boxplot() + bp = df_box.groupby("g").boxplot() .. ipython:: python :suppress: - plt.close('all') + plt.close("all") .. _visualization.area_plot: @@ -506,7 +506,7 @@ When input data contains ``NaN``, it will be automatically filled by 0. If you w .. ipython:: python - df = pd.DataFrame(np.random.rand(10, 4), columns=['a', 'b', 'c', 'd']) + df = pd.DataFrame(np.random.rand(10, 4), columns=["a", "b", "c", "d"]) @savefig area_plot_stacked.png df.plot.area(); @@ -516,7 +516,7 @@ To produce an unstacked plot, pass ``stacked=False``. Alpha value is set to 0.5 .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() .. ipython:: python @@ -537,29 +537,29 @@ These can be specified by the ``x`` and ``y`` keywords. :suppress: np.random.seed(123456) - plt.close('all') + plt.close("all") plt.figure() .. ipython:: python - df = pd.DataFrame(np.random.rand(50, 4), columns=['a', 'b', 'c', 'd']) + df = pd.DataFrame(np.random.rand(50, 4), columns=["a", "b", "c", "d"]) @savefig scatter_plot.png - df.plot.scatter(x='a', y='b'); + df.plot.scatter(x="a", y="b"); To plot multiple column groups in a single axes, repeat ``plot`` method specifying target ``ax``. It is recommended to specify ``color`` and ``label`` keywords to distinguish each groups. .. ipython:: python - ax = df.plot.scatter(x='a', y='b', color='DarkBlue', label='Group 1'); + ax = df.plot.scatter(x="a", y="b", color="DarkBlue", label="Group 1"); @savefig scatter_plot_repeated.png - df.plot.scatter(x='c', y='d', color='DarkGreen', label='Group 2', ax=ax); + df.plot.scatter(x="c", y="d", color="DarkGreen", label="Group 2", ax=ax); .. ipython:: python :suppress: - plt.close('all') + plt.close("all") The keyword ``c`` may be given as the name of a column to provide colors for each point: @@ -567,13 +567,13 @@ each point: .. ipython:: python @savefig scatter_plot_colored.png - df.plot.scatter(x='a', y='b', c='c', s=50); + df.plot.scatter(x="a", y="b", c="c", s=50); .. ipython:: python :suppress: - plt.close('all') + plt.close("all") You can pass other keywords supported by matplotlib :meth:`scatter `. The example below shows a @@ -582,12 +582,12 @@ bubble chart using a column of the ``DataFrame`` as the bubble size. .. ipython:: python @savefig scatter_plot_bubble.png - df.plot.scatter(x='a', y='b', s=df['c'] * 200); + df.plot.scatter(x="a", y="b", s=df["c"] * 200); .. ipython:: python :suppress: - plt.close('all') + plt.close("all") See the :meth:`scatter ` method and the `matplotlib scatter documentation `__ for more. @@ -609,11 +609,11 @@ too dense to plot each point individually. .. ipython:: python - df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b']) - df['b'] = df['b'] + np.arange(1000) + df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"]) + df["b"] = df["b"] + np.arange(1000) @savefig hexbin_plot.png - df.plot.hexbin(x='a', y='b', gridsize=25) + df.plot.hexbin(x="a", y="b", gridsize=25) A useful keyword argument is ``gridsize``; it controls the number of hexagons @@ -626,28 +626,28 @@ You can specify alternative aggregations by passing values to the ``C`` and and ``reduce_C_function`` is a function of one argument that reduces all the values in a bin to a single number (e.g. ``mean``, ``max``, ``sum``, ``std``). In this example the positions are given by columns ``a`` and ``b``, while the value is -given by column ``z``. The bins are aggregated with NumPy's ``max`` function. +given by column ``z``. The bins are aggregated with NumPy"s ``max`` function. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() np.random.seed(123456) .. ipython:: python - df = pd.DataFrame(np.random.randn(1000, 2), columns=['a', 'b']) - df['b'] = df['b'] = df['b'] + np.arange(1000) - df['z'] = np.random.uniform(0, 3, 1000) + df = pd.DataFrame(np.random.randn(1000, 2), columns=["a", "b"]) + df["b"] = df["b"] = df["b"] + np.arange(1000) + df["z"] = np.random.uniform(0, 3, 1000) @savefig hexbin_plot_agg.png - df.plot.hexbin(x='a', y='b', C='z', reduce_C_function=np.max, gridsize=25) + df.plot.hexbin(x="a", y="b", C="z", reduce_C_function=np.max, gridsize=25) .. ipython:: python :suppress: - plt.close('all') + plt.close("all") See the :meth:`hexbin ` method and the `matplotlib hexbin documentation `__ for more. @@ -671,7 +671,7 @@ A ``ValueError`` will be raised if there are any negative values in your data. :okwarning: series = pd.Series(3 * np.random.rand(4), - index=['a', 'b', 'c', 'd'], name='series') + index=["a", "b", "c", "d"], name="series") @savefig series_pie_plot.png series.plot.pie(figsize=(6, 6)) @@ -679,11 +679,11 @@ A ``ValueError`` will be raised if there are any negative values in your data. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") -For pie plots it's best to use square figures, i.e. a figure aspect ratio 1. +For pie plots it"s best to use square figures, i.e. a figure aspect ratio 1. You can create the figure with equal width and height, or force the aspect ratio -to be equal after plotting by calling ``ax.set_aspect('equal')`` on the returned +to be equal after plotting by calling ``ax.set_aspect("equal")`` on the returned ``axes`` object. Note that pie plot with :class:`DataFrame` requires that you either specify a @@ -701,7 +701,7 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it. .. ipython:: python df = pd.DataFrame(3 * np.random.rand(4, 2), - index=['a', 'b', 'c', 'd'], columns=['x', 'y']) + index=["a", "b", "c", "d"], columns=["x", "y"]) @savefig df_pie_plot.png df.plot.pie(subplots=True, figsize=(8, 4)) @@ -709,7 +709,7 @@ drawn in each pie plots by default; specify ``legend=False`` to hide it. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") You can use the ``labels`` and ``colors`` keywords to specify the labels and colors of each wedge. @@ -731,21 +731,21 @@ Also, other keywords supported by :func:`matplotlib.pyplot.pie` can be used. .. ipython:: python @savefig series_pie_plot_options.png - series.plot.pie(labels=['AA', 'BB', 'CC', 'DD'], colors=['r', 'g', 'b', 'c'], - autopct='%.2f', fontsize=20, figsize=(6, 6)) + series.plot.pie(labels=["AA", "BB", "CC", "DD"], colors=["r", "g", "b", "c"], + autopct="%.2f", fontsize=20, figsize=(6, 6)) If you pass values whose sum total is less than 1.0, matplotlib draws a semicircle. .. ipython:: python :suppress: - plt.close('all') + plt.close("all") plt.figure() .. ipython:: python :okwarning: - series = pd.Series([0.1] * 4, index=['a', 'b', 'c', 'd'], name='series2') + series = pd.Series([0.1] * 4, index=["a", "b", "c", "d"], name="series2") @savefig series_pie_plot_semi.png series.plot.pie(figsize=(6, 6)) @@ -755,7 +755,7 @@ See the `matplotlib pie documentation `__ for more. @@ -1545,7 +1545,7 @@ colors are selected based on an even spacing determined by the number of columns in the ``DataFrame``. There is no consideration made for background color, so some colormaps will produce lines that are not easily visible. -To use the cubehelix colormap, we can pass ``colormap='cubehelix'``. +To use the cubehelix colormap, we can pass ``colormap="cubehelix"``. .. ipython:: python :suppress: @@ -1560,12 +1560,12 @@ To use the cubehelix colormap, we can pass ``colormap='cubehelix'``. plt.figure() @savefig cubehelix.png - df.plot(colormap='cubehelix') + df.plot(colormap="cubehelix") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Alternatively, we can pass the colormap itself: @@ -1581,7 +1581,7 @@ Alternatively, we can pass the colormap itself: .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Colormaps can also be used other plot types, like bar charts: @@ -1598,12 +1598,12 @@ Colormaps can also be used other plot types, like bar charts: plt.figure() @savefig greens.png - dd.plot.bar(colormap='Greens') + dd.plot.bar(colormap="Greens") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Parallel coordinates charts: @@ -1612,12 +1612,12 @@ Parallel coordinates charts: plt.figure() @savefig parallel_gist_rainbow.png - parallel_coordinates(data, 'Name', colormap='gist_rainbow') + parallel_coordinates(data, "Name", colormap="gist_rainbow") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Andrews curves charts: @@ -1626,12 +1626,12 @@ Andrews curves charts: plt.figure() @savefig andrews_curve_winter.png - andrews_curves(data, 'Name', colormap='winter') + andrews_curves(data, "Name", colormap="winter") .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Plotting directly with matplotlib --------------------------------- @@ -1656,22 +1656,22 @@ when plotting a large number of points. .. ipython:: python price = pd.Series(np.random.randn(150).cumsum(), - index=pd.date_range('2000-1-1', periods=150, freq='B')) + index=pd.date_range("2000-1-1", periods=150, freq="B")) ma = price.rolling(20).mean() mstd = price.rolling(20).std() plt.figure() - plt.plot(price.index, price, 'k') - plt.plot(ma.index, ma, 'b') + plt.plot(price.index, price, "k") + plt.plot(ma.index, ma, "b") @savefig bollinger.png plt.fill_between(mstd.index, ma - 2 * mstd, ma + 2 * mstd, - color='b', alpha=0.2) + color="b", alpha=0.2) .. ipython:: python :suppress: - plt.close('all') + plt.close("all") Plotting backends ----------------- @@ -1680,26 +1680,26 @@ Starting in version 0.25, pandas can be extended with third-party plotting backe main idea is letting users select a plotting backend different than the provided one based on Matplotlib. -This can be done by passsing 'backend.module' as the argument ``backend`` in ``plot`` +This can be done by passsing "backend.module" as the argument ``backend`` in ``plot`` function. For example: .. code-block:: python - >>> Series([1, 2, 3]).plot(backend='backend.module') + >>> Series([1, 2, 3]).plot(backend="backend.module") -Alternatively, you can also set this option globally, do you don't need to specify +Alternatively, you can also set this option globally, do you don"t need to specify the keyword in each ``plot`` call. For example: .. code-block:: python - >>> pd.set_option('plotting.backend', 'backend.module') + >>> pd.set_option("plotting.backend", "backend.module") >>> pd.Series([1, 2, 3]).plot() Or: .. code-block:: python - >>> pd.options.plotting.backend = 'backend.module' + >>> pd.options.plotting.backend = "backend.module" >>> pd.Series([1, 2, 3]).plot() This would be more or less equivalent to: From d6f94454ff3894c8d761216cb901e876a4a2f4ac Mon Sep 17 00:00:00 2001 From: skorani Date: Mon, 5 Oct 2020 11:24:54 +0330 Subject: [PATCH 2/2] Fixed missing_data.rst , timedeltas.rst and timeseries.rst --- doc/source/user_guide/missing_data.rst | 160 ++-- doc/source/user_guide/missing_data.rst.gpg | Bin 0 -> 9843 bytes doc/source/user_guide/timedeltas.rst | 4 +- doc/source/user_guide/timeseries.rst | 802 ++++++++++----------- 4 files changed, 483 insertions(+), 483 deletions(-) create mode 100644 doc/source/user_guide/missing_data.rst.gpg diff --git a/doc/source/user_guide/missing_data.rst b/doc/source/user_guide/missing_data.rst index 9294897686d46..ef258ac0ecd79 100644 --- a/doc/source/user_guide/missing_data.rst +++ b/doc/source/user_guide/missing_data.rst @@ -38,12 +38,12 @@ arise and we wish to also consider that "missing" or "not available" or "NA". .. ipython:: python - df = pd.DataFrame(np.random.randn(5, 3), index=['a', 'c', 'e', 'f', 'h'], - columns=['one', 'two', 'three']) - df['four'] = 'bar' - df['five'] = df['one'] > 0 + df = pd.DataFrame(np.random.randn(5, 3), index=[ "a ", "c ", "e ", "f ", "h "], + columns=[ "one ", "two ", "three "]) + df[ "four "] = "bar " + df[ "five "] = df[ "one "] > 0 df - df2 = df.reindex(['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h']) + df2 = df.reindex([ "a ", "b ", "c ", "d ", "e ", "f ", "g ", "h "]) df2 To make detecting missing values easier (and across different array dtypes), @@ -53,14 +53,14 @@ Series and DataFrame objects: .. ipython:: python - df2['one'] - pd.isna(df2['one']) - df2['four'].notna() + df2[ "one "] + pd.isna(df2[ "one "]) + df2[ "four "].notna() df2.isna() .. warning:: - One has to be mindful that in Python (and NumPy), the ``nan's`` don't compare equal, but ``None's`` **do**. + One has to be mindful that in Python (and NumPy), the ``nan "s`` don "t compare equal, but ``None "s`` **do**. Note that pandas/NumPy uses the fact that ``np.nan != np.nan``, and treats ``None`` like ``np.nan``. .. ipython:: python @@ -68,11 +68,11 @@ Series and DataFrame objects: None == None # noqa: E711 np.nan == np.nan - So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn't provide useful information. + So as compared to above, a scalar equality comparison versus a ``None/np.nan`` doesn "t provide useful information. .. ipython:: python - df2['one'] == np.nan + df2[ "one "] == np.nan Integer dtypes and missing data ------------------------------- @@ -86,7 +86,7 @@ the dtype: pd.Series([1, 2, np.nan, 4], dtype=pd.Int64Dtype()) -Alternatively, the string alias ``dtype='Int64'`` (note the capital ``"I"``) can be +Alternatively, the string alias ``dtype= "Int64 "`` (note the capital ``"I"``) can be used. See :ref:`integer_na` for more. @@ -101,9 +101,9 @@ pandas objects provide compatibility between ``NaT`` and ``NaN``. .. ipython:: python df2 = df.copy() - df2['timestamp'] = pd.Timestamp('20120101') + df2[ "timestamp "] = pd.Timestamp( "20120101 ") df2 - df2.loc[['a', 'c', 'h'], ['one', 'timestamp']] = np.nan + df2.loc[[ "a ", "c ", "h "], [ "one ", "timestamp "]] = np.nan df2 df2.dtypes.value_counts() @@ -146,9 +146,9 @@ objects. .. ipython:: python :suppress: - df = df2.loc[:, ['one', 'two', 'three']] - a = df2.loc[df2.index[:5], ['one', 'two']].fillna(method='pad') - b = df2.loc[df2.index[:5], ['one', 'two', 'three']] + df = df2.loc[:, [ "one ", "two ", "three "]] + a = df2.loc[df2.index[:5], [ "one ", "two "]].fillna(method= "pad ") + b = df2.loc[df2.index[:5], [ "one ", "two ", "three "]] .. ipython:: python @@ -168,7 +168,7 @@ account for missing data. For example: .. ipython:: python df - df['one'].sum() + df[ "one "].sum() df.mean(1) df.cumsum() df.cumsum(skipna=False) @@ -210,7 +210,7 @@ with R, for example: .. ipython:: python df - df.groupby('one').mean() + df.groupby( "one ").mean() See the groupby section :ref:`here ` for more information. @@ -234,7 +234,7 @@ of ways, which we illustrate: df2 df2.fillna(0) - df2['one'].fillna('missing') + df2[ "one "].fillna( "missing ") **Fill gaps forward or backward** @@ -244,7 +244,7 @@ can propagate non-NA values forward or backward: .. ipython:: python df - df.fillna(method='pad') + df.fillna(method= "pad ") .. _missing_data.fillna.limit: @@ -261,7 +261,7 @@ we can use the ``limit`` keyword: .. ipython:: python df - df.fillna(method='pad', limit=1) + df.fillna(method= "pad ", limit=1) To remind you, these are the available filling methods: @@ -275,8 +275,8 @@ To remind you, these are the available filling methods: With time series data, using pad/ffill is extremely common so that the "last known value" is available at every time point. -:meth:`~DataFrame.ffill` is equivalent to ``fillna(method='ffill')`` -and :meth:`~DataFrame.bfill` is equivalent to ``fillna(method='bfill')`` +:meth:`~DataFrame.ffill` is equivalent to ``fillna(method= "ffill ")`` +and :meth:`~DataFrame.bfill` is equivalent to ``fillna(method= "bfill ")`` .. _missing_data.PandasObject: @@ -289,21 +289,21 @@ use case of this is to fill a DataFrame with the mean of that column. .. ipython:: python - dff = pd.DataFrame(np.random.randn(10, 3), columns=list('ABC')) + dff = pd.DataFrame(np.random.randn(10, 3), columns=list( "ABC ")) dff.iloc[3:5, 0] = np.nan dff.iloc[4:6, 1] = np.nan dff.iloc[5:8, 2] = np.nan dff dff.fillna(dff.mean()) - dff.fillna(dff.mean()['B':'C']) + dff.fillna(dff.mean()[ "B ": "C "]) -Same result as above, but is aligning the 'fill' value which is +Same result as above, but is aligning the "fill " value which is a Series in this case. .. ipython:: python - dff.where(pd.notna(dff), dff.mean(), axis='columns') + dff.where(pd.notna(dff), dff.mean(), axis= "columns ") .. _missing_data.dropna: @@ -317,15 +317,15 @@ data. To do this, use :meth:`~DataFrame.dropna`: .. ipython:: python :suppress: - df['two'] = df['two'].fillna(0) - df['three'] = df['three'].fillna(0) + df[ "two "] = df[ "two "].fillna(0) + df[ "three "] = df[ "three "].fillna(0) .. ipython:: python df df.dropna(axis=0) df.dropna(axis=1) - df['one'].dropna() + df[ "one "].dropna() An equivalent :meth:`~Series.dropna` is available for Series. DataFrame.dropna has considerably more options than Series.dropna, which can be @@ -343,7 +343,7 @@ that, by default, performs linear interpolation at missing data points. :suppress: np.random.seed(123456) - idx = pd.date_range('1/1/2000', periods=100, freq='BM') + idx = pd.date_range( "1/1/2000 ", periods=100, freq= "BM ") ts = pd.Series(np.random.randn(100), index=idx) ts[1:5] = np.nan ts[20:30] = np.nan @@ -376,9 +376,9 @@ Index aware interpolation is available via the ``method`` keyword: ts2 ts2.interpolate() - ts2.interpolate(method='time') + ts2.interpolate(method= "time ") -For a floating-point index, use ``method='values'``: +For a floating-point index, use ``method= "values "``: .. ipython:: python :suppress: @@ -390,27 +390,27 @@ For a floating-point index, use ``method='values'``: ser ser.interpolate() - ser.interpolate(method='values') + ser.interpolate(method= "values ") You can also interpolate with a DataFrame: .. ipython:: python - df = pd.DataFrame({'A': [1, 2.1, np.nan, 4.7, 5.6, 6.8], - 'B': [.25, np.nan, np.nan, 4, 12.2, 14.4]}) + df = pd.DataFrame({ "A ": [1, 2.1, np.nan, 4.7, 5.6, 6.8], + "B ": [.25, np.nan, np.nan, 4, 12.2, 14.4]}) df df.interpolate() The ``method`` argument gives access to fancier interpolation methods. If you have scipy_ installed, you can pass the name of a 1-d interpolation routine to ``method``. -You'll want to consult the full scipy interpolation documentation_ and reference guide_ for details. +You "ll want to consult the full scipy interpolation documentation_ and reference guide_ for details. The appropriate interpolation method will depend on the type of data you are working with. * If you are dealing with a time series that is growing at an increasing rate, - ``method='quadratic'`` may be appropriate. + ``method= "quadratic "`` may be appropriate. * If you have values approximating a cumulative distribution function, - then ``method='pchip'`` should work well. -* To fill missing values with goal of smooth plotting, consider ``method='akima'``. + then ``method= "pchip "`` should work well. +* To fill missing values with goal of smooth plotting, consider ``method= "akima "``. .. warning:: @@ -418,20 +418,20 @@ The appropriate interpolation method will depend on the type of data you are wor .. ipython:: python - df.interpolate(method='barycentric') + df.interpolate(method= "barycentric ") - df.interpolate(method='pchip') + df.interpolate(method= "pchip ") - df.interpolate(method='akima') + df.interpolate(method= "akima ") When interpolating via a polynomial or spline approximation, you must also specify the degree or order of the approximation: .. ipython:: python - df.interpolate(method='spline', order=2) + df.interpolate(method= "spline ", order=2) - df.interpolate(method='polynomial', order=2) + df.interpolate(method= "polynomial ", order=2) Compare several methods: @@ -442,16 +442,16 @@ Compare several methods: ser = pd.Series(np.arange(1, 10.1, .25) ** 2 + np.random.randn(37)) missing = np.array([4, 13, 14, 15, 16, 17, 18, 20, 29]) ser[missing] = np.nan - methods = ['linear', 'quadratic', 'cubic'] + methods = [ "linear ", "quadratic ", "cubic "] df = pd.DataFrame({m: ser.interpolate(method=m) for m in methods}) @savefig compare_interpolations.png df.plot() Another use case is interpolation at *new* values. -Suppose you have 100 observations from some distribution. And let's suppose -that you're particularly interested in what's happening around the middle. -You can mix pandas' ``reindex`` and ``interpolate`` methods to interpolate +Suppose you have 100 observations from some distribution. And let "s suppose +that you "re particularly interested in what "s happening around the middle. +You can mix pandas " ``reindex`` and ``interpolate`` methods to interpolate at the new values. .. ipython:: python @@ -460,7 +460,7 @@ at the new values. # interpolate at new_index new_index = ser.index | pd.Index([49.25, 49.5, 49.75, 50.25, 50.5, 50.75]) - interp_s = ser.reindex(new_index).interpolate(method='pchip') + interp_s = ser.reindex(new_index).interpolate(method= "pchip ") interp_s[49:51] .. _scipy: https://www.scipy.org @@ -494,13 +494,13 @@ By default, ``NaN`` values are filled in a ``forward`` direction. Use .. ipython:: python # fill one consecutive value backwards - ser.interpolate(limit=1, limit_direction='backward') + ser.interpolate(limit=1, limit_direction= "backward ") # fill one consecutive value in both directions - ser.interpolate(limit=1, limit_direction='both') + ser.interpolate(limit=1, limit_direction= "both ") # fill all consecutive values in both directions - ser.interpolate(limit_direction='both') + ser.interpolate(limit_direction= "both ") By default, ``NaN`` values are filled whether they are inside (surrounded by) existing valid values, or outside existing valid values. The ``limit_area`` @@ -509,13 +509,13 @@ parameter restricts filling to either inside or outside values. .. ipython:: python # fill one consecutive inside value in both directions - ser.interpolate(limit_direction='both', limit_area='inside', limit=1) + ser.interpolate(limit_direction= "both ", limit_area= "inside ", limit=1) # fill all consecutive outside values backward - ser.interpolate(limit_direction='backward', limit_area='outside') + ser.interpolate(limit_direction= "backward ", limit_area= "outside ") # fill all consecutive outside values in both directions - ser.interpolate(limit_direction='both', limit_area='outside') + ser.interpolate(limit_direction= "both ", limit_area= "outside ") .. _missing_data.replace: @@ -551,16 +551,16 @@ For a DataFrame, you can specify individual values by column: .. ipython:: python - df = pd.DataFrame({'a': [0, 1, 2, 3, 4], 'b': [5, 6, 7, 8, 9]}) + df = pd.DataFrame({ "a ": [0, 1, 2, 3, 4], "b ": [5, 6, 7, 8, 9]}) - df.replace({'a': 0, 'b': 5}, 100) + df.replace({ "a ": 0, "b ": 5}, 100) Instead of replacing with specified values, you can treat all given values as missing and interpolate over them: .. ipython:: python - ser.replace([1, 2, 3], method='pad') + ser.replace([1, 2, 3], method= "pad ") .. _missing_data.replace_expression: @@ -569,79 +569,79 @@ String/regular expression replacement .. note:: - Python strings prefixed with the ``r`` character such as ``r'hello world'`` + Python strings prefixed with the ``r`` character such as ``r "hello world "`` are so-called "raw" strings. They have different semantics regarding backslashes than strings without this prefix. Backslashes in raw strings - will be interpreted as an escaped backslash, e.g., ``r'\' == '\\'``. You + will be interpreted as an escaped backslash, e.g., ``r "\ " == "\\ "``. You should `read about them `__ if this is unclear. -Replace the '.' with ``NaN`` (str -> str): +Replace the ". " with ``NaN`` (str -> str): .. ipython:: python - d = {'a': list(range(4)), 'b': list('ab..'), 'c': ['a', 'b', np.nan, 'd']} + d = { "a ": list(range(4)), "b ": list( "ab.. "), "c ": [ "a ", "b ", np.nan, "d "]} df = pd.DataFrame(d) - df.replace('.', np.nan) + df.replace( ". ", np.nan) Now do it with a regular expression that removes surrounding whitespace (regex -> regex): .. ipython:: python - df.replace(r'\s*\.\s*', np.nan, regex=True) + df.replace(r "\s*\.\s* ", np.nan, regex=True) Replace a few different values (list -> list): .. ipython:: python - df.replace(['a', '.'], ['b', np.nan]) + df.replace([ "a ", ". "], [ "b ", np.nan]) list of regex -> list of regex: .. ipython:: python - df.replace([r'\.', r'(a)'], ['dot', r'\1stuff'], regex=True) + df.replace([r "\. ", r "(a) "], [ "dot ", r "\1stuff "], regex=True) -Only search in column ``'b'`` (dict -> dict): +Only search in column `` "b "`` (dict -> dict): .. ipython:: python - df.replace({'b': '.'}, {'b': np.nan}) + df.replace({ "b ": ". "}, { "b ": np.nan}) Same as the previous example, but use a regular expression for searching instead (dict of regex -> dict): .. ipython:: python - df.replace({'b': r'\s*\.\s*'}, {'b': np.nan}, regex=True) + df.replace({ "b ": r "\s*\.\s* "}, { "b ": np.nan}, regex=True) You can pass nested dictionaries of regular expressions that use ``regex=True``: .. ipython:: python - df.replace({'b': {'b': r''}}, regex=True) + df.replace({ "b ": { "b ": r " "}}, regex=True) Alternatively, you can pass the nested dictionary like so: .. ipython:: python - df.replace(regex={'b': {r'\s*\.\s*': np.nan}}) + df.replace(regex={ "b ": {r "\s*\.\s* ": np.nan}}) You can also use the group of a regular expression match when replacing (dict of regex -> dict of regex), this works for lists as well. .. ipython:: python - df.replace({'b': r'\s*(\.)\s*'}, {'b': r'\1ty'}, regex=True) + df.replace({ "b ": r "\s*(\.)\s* "}, { "b ": r "\1ty "}, regex=True) You can pass a list of regular expressions, of which those that match will be replaced with a scalar (list of regex -> regex). .. ipython:: python - df.replace([r'\s*\.\s*', r'a|b'], np.nan, regex=True) + df.replace([r "\s*\.\s* ", r "a|b "], np.nan, regex=True) All of the regular expression examples can also be passed with the ``to_replace`` argument as the ``regex`` argument. In this case the ``value`` @@ -650,7 +650,7 @@ dictionary. The previous example, in this case, would then be: .. ipython:: python - df.replace(regex=[r'\s*\.\s*', r'a|b'], value=np.nan) + df.replace(regex=[r "\s*\.\s* ", r "a|b "], value=np.nan) This can be convenient if you do not want to pass ``regex=True`` every time you want to use a regular expression. @@ -676,7 +676,7 @@ Replacing more than one value is possible by passing a list. .. ipython:: python df00 = df.iloc[0, 0] - df.replace([1.5, df00], [np.nan, 'a']) + df.replace([1.5, df00], [np.nan, "a "]) df[1].dtype You can also operate on the DataFrame in place: @@ -690,7 +690,7 @@ Missing data casting rules and indexing While pandas supports storing arrays of integer and boolean type, these types are not capable of storing missing data. Until we can switch to using a native -NA type in NumPy, we've established some "casting rules". When a reindexing +NA type in NumPy, we "ve established some "casting rules". When a reindexing operation introduces missing data, the Series will be cast according to the rules introduced in the table below. @@ -891,7 +891,7 @@ statements, see :ref:`gotchas.truth`. NumPy ufuncs ------------ -:attr:`pandas.NA` implements NumPy's ``__array_ufunc__`` protocol. Most ufuncs +:attr:`pandas.NA` implements NumPy "s ``__array_ufunc__`` protocol. Most ufuncs work with ``NA``, and generally return ``NA``: .. ipython:: python @@ -932,7 +932,7 @@ the first 10 columns. .. ipython:: python - bb = pd.read_csv('data/baseball.csv', index_col='id') + bb = pd.read_csv( "data/baseball.csv ", index_col= "id ") bb[bb.columns[:10]].dtypes .. ipython:: python diff --git a/doc/source/user_guide/missing_data.rst.gpg b/doc/source/user_guide/missing_data.rst.gpg new file mode 100644 index 0000000000000000000000000000000000000000..29bf4c2ad465f1efab4e37e13828a32bcb7442f6 GIT binary patch literal 9843 zcmV-(CXCsm0j+rrkR*3?pQI>d)L1bt0!eHze+Hq6Wcf-0|_|bAV|eHCPi>;h@l))sVHR# z7&}yfWjjse&`xR&9cRzLC zwf{;#-+$34%(5`v-3h$hbJHx}dF1ogyRO=)KJPltqmQbg_5z)%%aSy!!T2s_VX}_+iI$DKoqGU%=}4~6$drFC`(kT2Rco4pz=g{ zncCcFsXZ^6=uCyNGxp-Z%Us6+6LFI3E{Iq7xiHkqA10x%RWeW$KCai>^fr4vY|C{T zdr>r_BLhwQYQ(E4?uoo~S4T5GZIGlY3rFK9^uv6naHksU6i-K9j2lwzWl0PwXzlsj zOLIDDkR~G~^r=<1(^gqB0ySge*^A_&d^QFj8EsMe63z@qI;P!x6id}pv6qK?n#kV( z^_lO1G{cgoMqYODNZ-pKA8$NP6VD&I>b%x`Ko_#>_J%su>h_xEH#bi8n5$8eYDfyZ4{mj{*~#2^!5&E<01K&?>)lY3F5WEa@gb8CHiXv=b3ns+{NNwQ z*~mmJKF%ifg1dO8vY|&(BB_(ev$mD07mhMgVPC63q%Vd2NQ=x;z3ZicgI7bWj7U;d z{krP@VHoMsATnk?&;xH0<;7CAGLVp7gdgB$$LN_X3KWhfxo32ljWx+11cK*19ful1 zVY`mC7%oWkh&+76AtCH4&^hd1JV9<^(wTo;5QGDmJ&ZwYqzeuckL_xlz<}Fq#k)Ff zJ5W%hJ-89wzpG=Nf}*7V5Ked1StCOq76QQyld0arCg%6tEFUH@Ng;_f#6Ujicxjkv zK8n3E8?xJxA6j){ua}n^Rj`EyTu6C)UKkOR@@R9TvCl2f4%94}z`cmA%89*R7!N=J zUEAv&kuM@OI)^NwBJiwDXVF2v7bA+~*kj^=cvt3*l0dtYOz+^R9WUF7J+-bbq!X|y zMA?AnhFMGj50Anz%cI*>@_{K12G}?b+}A@bcc$J*H{-FJ;<9AKf8yqewmQ}#bp-m- z`j%>Vs?k=e;maQ_e+Kers2bZ${Qe(5i6)~sqZ6PFS)NZ5^I@36K^xmGAqi^+$pkXq z263vuGdZzxvq8~)0NZL3qNBP-8@ zjuPN~BWDjGuJywcFKVm)BsZ=tt5#NmWMzeO)Fvf7zCa{>ln+KqF|rea2r(RD2O^#rgXJzn+!i&0C&R&L#4w5L-0K(SSbPdZ~@#F?6CSmLqpQ1SKV)@oa=wT+b9>iBkBHm_p|M1J!4 z>*-sw1;^Zo7#0%Nie}elme3Oq4GnMx1Fb3A*Iw=5yV<4+6vw z7?2}J%;!|7h^rD*BzV3{ImN)L+=GTwHUjM+nj^@~!CH1QuDbeMDM9N>%7`RZg^#&n zgZdJr5Fu{HlM(#NuUsK#c+u3GWvUu~1cOI9BfQLBTPJ|+NG@LD$(iW2?#;&}-Da%F z4ylIXG(U2jyOD^1TS2U~(JN7Vfvu1D6zi?5FG>yGM;nL{VPF@|bQPpGJQ!7@Prv-e z!3^-D)w;Q5HG7f@4(ZE>Lo((85oao27gXa*sxG#(d+LS-?;U%)5Fgc^lT=FOVVX>K z30UEDfGSu)f`ZjtX+2xS<(M#VfKyXtnoY(OYqD+$v#o1(=iG1}1v3cqDjOJH5s=;L zo|yMx+PuB(4xm5aF9CxH;bSi_aiM<|2i2)KQ1c$2D^$J)ADjzZU1ok7j=7Z2sE|QA z^D|R4R*OjsSWPzs9vQMnI+8!QKty7iz^j4w0B6#@P*2tE@Ln9~(%BhmbViI3ZovU% zAHcaCdCJY$CX1!Cvf3)3C3MYz0J2@xGLSk_Q&>O_UFQix3x_;OWVH&r5;maX6;#~V zkxfP;yQHl)H&oMXDq1==A~C$AnmSXD=rnP00*XhDOT@n=WphIkH7toLce}eo>(5L^ z>=BT+6)A(%E`96v9xqA&*Po0)nUTYyxA2Dmh-x=Rt|qd}_-lFw0=GGNrh6PDMn-Ff zG=>p`bton$thJQ#BbWbuJ@ocMDjSG`F!rNKP`RgVB2{)V9LL`JohWPc6%uAq4GPe7 z!JbW=?nrxabJczm-kWn@t5+eL?GHaD5$s?w)O&t1I!ZDke5psUah)AS?8**|I}6I7 z3l&MEgj?cdN{)hflCmA(?7fb=w&r$3Os2sE1CMhFdDSLPNGJnTj%U5zYe_RC;0$pr ztI!G)Is!41seMCX-EBZ&i`OcXo!wm`rQ2KFb9H;RQsAc+Q{ z<+~?x^(u*^3lB%7UFnn_2rb`*v@||FTWD&$be9%XmrC@QtkzZHd>Sv3X~Euoat{D?HCo8;}Cc zQAO!)AIQ8y35j}R4#b;oLO32XRs{2V$o_C63Bn`XHa4K62(d(4pgp?X*@r{i1(8HD zGvs*Z9L=KQu95{;gfRO9w`}Y&fNszi$g5Ea-%MVd#AMja5LY@#Xwa(z!zPeuOVcTY z9Yqsqb5lZFxlfNH&*w00`t+uyz^BukV@W+@s|N#1I+dcvL~Qe>=(7@9!L1}Dwd;*p z&{K+xkayqnFVbha^i8~&i&t_;YY1MtI~j4A2>pt6ndTq8wb-G2bgD<~YllQv0fQnd zBfbR$EN!Bh408YyJIFBHC>(`3OZ;y7#H@KEMjgtf zweIn@>Q<2bcPXp3VoKImTikX{HMI<>9nMgQn)j)YTr}j%a3D@9r2E<4k(^#50aKjn z%EYg$8u#!rs#G>O95v+qGz{_~9P(JFtxj|##t}sbH*`T;v2oU|gBSJL`Hr%g>|be( z$8qlGBu*8Z091n$Wz>b6jt<0&Gelni#F37;Z#P2t$r5Q~5Z{f+%W~%;@Ix$jG`I~? zNCV4g2r`qaSl{l#_(8XEBaMO}PSP^A==J9HB6Kxcy`HleL-r4SRKGlGze`9%?OqPg zm56ELPxbBcIh$=OaD!YZCBgSM5y38Jq9`>Oe z;EWJ7kt%Xu@R^x&*hO4o$WX}4nur|fTafF~+)CC;Gs`TffsO+EQC5}3)Y9UMmVgn{ zj=kc5XSE{}9y_6-nhJ4wKqIZz6%%sHIA2)>7 z-c{t9E!*?313>vUk2JQ*Mn13? z*6X2rPy{B|nU@j|(dbx+SK3-#B-6z9CWo@kMZ! z+WYA^i3BE|Exv&UXU?KdC$PoFS z4npc6)epnDPv2wuaCyC7x<3kc5D_@Rtm7uLWoxy2q8=sjY^~Eh*4g*;WT$)UzNh#K z-isc{f%cwf^4sNhmvl>Bmq9y&aMys9oxX-#ij zB5dfzeyHa|o+*1=DiH~4a#+oL$Y6&iYq>{tVVhrZ!RygsLmGaJv~;*uj|6jPrpbh+ zr~=|GVJ@qoK|OL$06yg-f>w-mCwRfv&Ycw`KC{E$b~qO>V$iXV(shD-wqyD#C|D!# zLE`YnRG6nB>39*l(hLvuSjWXok7sVS&?R;$0>Gg!+A6|zq|t8QnNE&1qH1ks3xnM> znKJ6i6%Q)?RC}CVs8d3QQSrBjCtg4kBcEK3(Nw?2h+E#yIx_anD=vj2Pul3pFFX4I zz<3%0t`g&eNzC;s-I@=1&Rmwx{xBT#wb_t+y2Q4cY7`r|^nxiX<_4_H_HN=uB(!Xl zP{O7lN_4aftWmwjy9hGr8h2rfo62+BDN70aq}pqS~*;xDGVCG1BewR=t z&05nyx-R)}ZvWBS(H4~wwy^uV>0`U`UB-CZV5qiaLC*{pAE3e z6F{`e*#N&05?g9zMXjmVsCle&>=vowtTi%b;ptIxi=yT+ikbM2pt?$xBtr2j)r&~1 zYqxFNl(A*D*D;T^zIB>z8i2@i&5Cc*SAWu{kJ|?&hEeyRv@~i-gPQ6=bKB)QVDd%G zJGHQtS=a)lS)j6k$`D1ODmnqw1eiy&E3uxg7>{+G^T^gorpp)t;0QcTb2&4r<`OMS zM)ly~stuZ#qGW4iDw9JPTk}Z`7zId%bP#yb$OLfB!&R_JLm%2VgdOWx`mMlffpjIp zAc(YEWav?NNts4_1I}-Hu00t)_Ig!t-z(IW*OIzpI$bnzvWzc77W2lKC23wk*poP< zf}xp(k7xqtt(KVat7^AhVwtvy@aQme;;Q{mI~KOml_(Epo@=G*`VTGqdgjy1}BO zqM={*;HzExUT@Re#BpNcj_KcnX)iG+{DIQ}9VkHbEm!=m}-Rs#tKVBm}a1_Qv7+&?rWr1ud|Mv833Cm`3Qc}tY zO7>Y~bR{7Z!4-0^_BPsGzv*D(IP6u6Z#bNCr$f!2k6$w-{Q$zWs+mnvPGMNnpS2u) z3AxfBW4N0f)Mgwv$$7`Z6*4Ta7J~D=Ue76wgEAWUKsmrZu?(YAuQm9g}FEOQh9C9Kls&LYo0J|5LO-NbHq*H?h~NO!HJhTU1(Bu+3nKn zD!IOFI}~hDhTDy-!SY&~IZ<}BrUPFUXzcKlt4VpbbG3&5$6V^SW7>jiAO)0cw-wbl$N0f24bkQsTc(u6r6W&MeC)?*a4-q40?pRXCP;)YbOUJnn?e^9z^a=Kvn42pgd;1GZ%_hwJ7XC8&iK7nvb*Y|%+>QHB4> zo}`JkfMNEJ#-*<4U!{1w-Qk< zdeeqIopaz!iqRk(FuKnnsu99j=u;nP7jZq5;+MjF23$MTW@2SYB+3wI1w;$It~y;= zjW~r{?Rq+k8-RV=9ZWS9tDc0e%=gBW+3o(e((bO?7WN)QUAn#wJA3d!?qn&rAZu3| zXQ)`AMz1f_=n=iTy;jzUMQ~kntWwWVgoJ>0yx5CoS(r)DBm^Ic!kk(z*{R;n4i|F+ zVHN)-u^(wKb!`gKW*qH;PF9$OH%*+Sj+`PHw3fLiywmy<#jd8bW{$U7g>&&^Ea~Hn z20!-MO?j7DRYHH&fXcToFLwQ)HRsAqxZMnA*i5E021X@hx~vNfo*NOV9wpT7w$O~- zd<2eBq0on`!$hL+_zb!;HviEkhc(bENy&fdM$fZkmTm!gR5CS84UPEjpuvgXa*?Qelt*z z0bP0Cfs?rdNUe|zQs+tAh!A9Ao*YE8MFXEXYG*LIixk)o=}UZ9K}t<*g|sfuW12}O z0c{sfJ(h9P0`-o(9F{nxu6&u~S-Z}>b_;bQaB&Q!QAfs2S@I#u@@DXH^+wQq&~43| z0lQH?<7Wq3f>(9mdF8k1IbxYl**9T_rr0wH5dQz z)qAYZ;b37X+=wDOvsj=cK7C4p5txzU}$eBQNMODZ~Ru3JD#yB&XT4IM|mmU;D-FcX} z5zpXp+!c{ARX^Di4O#6K8ZSl#nO=N2qeLEkgai>6SPz($-bv}7o6s#)%2xoqWtqFE zfCXYexlWgX_x%g0Ul4oCr;qcXW`1HtWz|~CAf2uH+a@d^8v<``p`KHxRL4C*BdJyQ z#GHAox+f~IsB9OEl2i*Arp}IO;{XIF5F1t8uy0bn%!Vl%EM=wB!R0gA%TSw;{mU1TMk#Z-5{Th^;{@VQ1x@IB<51~4PZaE4RQ!SsR)9+jvv|pbdM>| zsB9unq#3DGV6 zBmp7^lMn&r7#eHS=ggrxh`bW@g-5!Ic z8)xX2JZZ?9X&B_gtgDX0K96_UJgofNY7iw}&X0^5kUaYjX*Pcz^p8si!wf*k&*?*xKeddv;DMi_&{;2@+c{sQ4!gA_ zfrM7tzt-FCB_#kwBCD}Ih?Xd*MjkX^GDDuuiZCA=_QjG_Q&YDC>u@$wN{(GwVa`&| zvUTc}$Mn$*@4p1UlQgTuDNj@{u)>Hv!2g%+fgl4Bz z+p>pGPJkuuPQ+W?+CF#zhf9!q zP6AumxK;5air7nSL<=O0QFTSu!^o%tpHlrWw3m~`F#PtZb;h9p{+>y&fIUcs=&H>m z4}Ak*5v&AjARLCd7pcbC2Cj7k!tuT9jk9rn@;LQb4(p1hiD(mq=_J-p6DLGH2{6)4 z^`P4`%=Or;=mE@g1gYCP+h@e;%3aRMr+TVL zHJPn4YNT9v-c2wdjwpa-2rOU}KIHi+&{~y_ zW+lk>dSc7H9*-)4@?=IV*LxRG<^+1IrQ>C=RdH0yttGU!)W~Eq70gkNy}UBNn^y09 zJa9}rut7>%5=Ib90@b{iO8=X95ssRT%u z>LjJTY-VR)T&tzIU?rwe=nFI6hP0-w;c|6#`&}w~mETXG>Cz+ViHfGB!l$6{4ZM3E%dTh^?)#5<#C1l`E!M z!0Y3{^s_I<$ut(UiHPMB9H_3#O*JU(Az3|d<)v#Dgk^KWDQRrm3NRx5IHkWWF=swr zy+*Clmxk9+E7mkGu+j+WOGB8A0|A5j0+|?;^hYwXMDStQW$4APOTjo*!APQ6k_UyR zl@-C3tMtjx1kE8$qk7I~2OC9IYAoAQbky?z1%ciR>8rzuVL|pO1C=xe%6l?9i?B;@ z1^gj%Qk_pyY8^9XIRt;}>mu~xoA6jXxm~THc6Khn(T3W;NNCn8%VHJ-xn9Am7+QBQ zi7BOIi~#VNtxO`eLlJwqEzrIuA4pxWDjZErTL|zLMmMhbDiuj=Co8E&mBcz&K?*gq zG*bVF@))NN%1LoJnv7ajwl?lbNXs}5T?}RHJD*wsbWB8%9!))@ARYUYNQ~fA4gcJo z?9xnXdE^k+i}o$8+S4G3Q1eN0mEzDCcrPEOS|4FT139D|m+9D@hTsY5+v9l|eLH%g zvSWvL&{Rw{Q5LMcA<~FYa?VQKuho5Rb^eWav;F_bB;wM7ty(T6QL_+}IGQCXo` zc2GekhGT<)Lnv`R3Jr|vtZW)d+Ni{^X0P{HuSb>QKrIM{4*R_H>bSD^BFaQF4j9!K zVk@3mTeF8xJWq1emP8j<# zIPmOgCTdZ2QLFt(t)|ovffzxU+`(KpnkeM5s7g1YrpZ9P9_#@dgOKd4t6bM5 zv*8nZz2b%3=!_GpWyS>YgOa?djY|GWbg%4<$i^)R0ZK+pi`bzN;A~- zbWQJjFu^(kRb|&H$hbxB#*@)_wnGIrB5?`82s?_*+=ZQ}N+AeYpDDgLRa-hMO<&{` zDo7U8xyH8h)Ds6Ic+B-EM-|>zodP&TAIIjJ8t%`F>Zce&_w32zezf`-og5kp`yiNzFW@@vICy zRx5D^@tL&S0hJAaq_VdyYq`U#tX?q8b=^apC+=P@!=S811<*7$jL|(%> zJ>K!NyIq60>n*|3H% zSzO%R7=$Up%4)~%TvDy|`_k~SOf+StbCml0eO&J184b9(J-vhT)>R`6l(c{P{jGlA zHJ5C4S3BEgq?j$cFvg9%t!gGsb6|`UEHhS4?Vv3E6&RAe(JuzdEL$@%FvdcRcy+7ySJ1O#j!T&wbnf>^Hyk*hk**u^0aJXFu|N|MVCB^{q?WD_{%@|x~=VB-TE)l z=f3rL`!9a&n$PTgXY*sZ9*^zx#Jhj_WzCyD@K=owfBlhny!Lf}``@+y zD*UZaf9S!_{Lw4ElAmn+(EXRx)35j+?|t#nfAqQsZvN2qfB(O|^v$(r;lFy zfxr9QnR`!!@BjW6eetdz-uce^N5A}+@BGC}|N7(ae&KI_c>2I+pMK^~|KKN1AGztF z8{9Ac-oO9muYKe5PyE>vf1ZBd*IGBt{`qhH!N0xc$*(1MedqVz^^>1^>NlVG-_M1g Zd-XrN@%vx4_2@7Bt>->@ ``nan``. Note that division by the NumPy scalar is true division, while astyping is equivalent of floor division. @@ -355,7 +355,7 @@ or ``np.timedelta64`` objects. Passing ``np.nan/pd.NaT/nat`` will represent miss ] ) -The string 'infer' can be passed in order to set the frequency of the index as the +The string "infer" can be passed in order to set the frequency of the index as the inferred frequency upon creation: .. ipython:: python diff --git a/doc/source/user_guide/timeseries.rst b/doc/source/user_guide/timeseries.rst index 61902b4a41b7c..c4be12680e3dc 100644 --- a/doc/source/user_guide/timeseries.rst +++ b/doc/source/user_guide/timeseries.rst @@ -1,7 +1,7 @@ .. _timeseries: {{ header }} - +" ******************************** Time series / date functionality ******************************** @@ -19,7 +19,7 @@ Parsing time series information from various sources and formats import datetime - dti = pd.to_datetime(['1/1/2018', np.datetime64('2018-01-01'), + dti = pd.to_datetime(["1/1/2018", np.datetime64("2018-01-01"), datetime.datetime(2018, 1, 1)]) dti @@ -27,34 +27,34 @@ Generate sequences of fixed-frequency dates and time spans .. ipython:: python - dti = pd.date_range('2018-01-01', periods=3, freq='H') + dti = pd.date_range("2018-01-01", periods=3, freq="H") dti Manipulating and converting date times with timezone information .. ipython:: python - dti = dti.tz_localize('UTC') + dti = dti.tz_localize("UTC") dti - dti.tz_convert('US/Pacific') + dti.tz_convert("US/Pacific") Resampling or converting a time series to a particular frequency .. ipython:: python - idx = pd.date_range('2018-01-01', periods=5, freq='H') + idx = pd.date_range("2018-01-01", periods=5, freq="H") ts = pd.Series(range(len(idx)), index=idx) ts - ts.resample('2H').mean() + ts.resample("2H").mean() Performing date and time arithmetic with absolute or relative time increments .. ipython:: python - friday = pd.Timestamp('2018-01-05') + friday = pd.Timestamp("2018-01-05") friday.day_name() # Add 1 day - saturday = friday + pd.Timedelta('1 day') + saturday = friday + pd.Timedelta("1 day") saturday.day_name() # Add 1 business day (Friday --> Monday) monday = friday + pd.offsets.BDay() @@ -85,18 +85,18 @@ Time spans ``Period`` ``PeriodIndex`` ``period[freq]` Date offsets ``DateOffset`` ``None`` ``None`` ``DateOffset`` ===================== ================= =================== ============================================ ======================================== -For time series data, it's conventional to represent the time component in the index of a :class:`Series` or :class:`DataFrame` +For time series data, it"s conventional to represent the time component in the index of a :class:`Series` or :class:`DataFrame` so manipulations can be performed with respect to the time element. .. ipython:: python - pd.Series(range(3), index=pd.date_range('2000', freq='D', periods=3)) + pd.Series(range(3), index=pd.date_range("2000", freq="D", periods=3)) However, :class:`Series` and :class:`DataFrame` can directly also support the time component as data itself. .. ipython:: python - pd.Series(pd.date_range('2000', freq='D', periods=3)) + pd.Series(pd.date_range("2000", freq="D", periods=3)) :class:`Series` and :class:`DataFrame` have extended data type support and functionality for ``datetime``, ``timedelta`` and ``Period`` data when passed into those constructors. ``DateOffset`` @@ -104,9 +104,9 @@ data however will be stored as ``object`` data. .. ipython:: python - pd.Series(pd.period_range('1/1/2011', freq='M', periods=3)) + pd.Series(pd.period_range("1/1/2011", freq="M", periods=3)) pd.Series([pd.DateOffset(1), pd.DateOffset(2)]) - pd.Series(pd.date_range('1/1/2011', freq='M', periods=3)) + pd.Series(pd.date_range("1/1/2011", freq="M", periods=3)) Lastly, pandas represents null date times, time deltas, and time spans as ``NaT`` which is useful for representing missing or null date like values and behaves similar @@ -132,7 +132,7 @@ time. .. ipython:: python pd.Timestamp(datetime.datetime(2012, 5, 1)) - pd.Timestamp('2012-05-01') + pd.Timestamp("2012-05-01") pd.Timestamp(2012, 5, 1) However, in many cases it is more natural to associate things like change @@ -143,9 +143,9 @@ For example: .. ipython:: python - pd.Period('2011-01') + pd.Period("2011-01") - pd.Period('2012-05', freq='D') + pd.Period("2012-05", freq="D") :class:`Timestamp` and :class:`Period` can serve as an index. Lists of ``Timestamp`` and ``Period`` are automatically coerced to :class:`DatetimeIndex` @@ -153,9 +153,9 @@ and :class:`PeriodIndex` respectively. .. ipython:: python - dates = [pd.Timestamp('2012-05-01'), - pd.Timestamp('2012-05-02'), - pd.Timestamp('2012-05-03')] + dates = [pd.Timestamp("2012-05-01"), + pd.Timestamp("2012-05-02"), + pd.Timestamp("2012-05-03")] ts = pd.Series(np.random.randn(3), dates) type(ts.index) @@ -163,7 +163,7 @@ and :class:`PeriodIndex` respectively. ts - periods = [pd.Period('2012-01'), pd.Period('2012-02'), pd.Period('2012-03')] + periods = [pd.Period("2012-01"), pd.Period("2012-02"), pd.Period("2012-03")] ts = pd.Series(np.random.randn(3), periods) @@ -193,47 +193,47 @@ is converted to a ``DatetimeIndex``: .. ipython:: python - pd.to_datetime(pd.Series(['Jul 31, 2009', '2010-01-10', None])) + pd.to_datetime(pd.Series(["Jul 31, 2009", "2010-01-10", None])) - pd.to_datetime(['2005/11/23', '2010.12.31']) + pd.to_datetime(["2005/11/23", "2010.12.31"]) If you use dates which start with the day first (i.e. European style), you can pass the ``dayfirst`` flag: .. ipython:: python - pd.to_datetime(['04-01-2012 10:00'], dayfirst=True) + pd.to_datetime(["04-01-2012 10:00"], dayfirst=True) - pd.to_datetime(['14-01-2012', '01-14-2012'], dayfirst=True) + pd.to_datetime(["14-01-2012", "01-14-2012"], dayfirst=True) .. warning:: - You see in the above example that ``dayfirst`` isn't strict, so if a date - can't be parsed with the day being first it will be parsed as if + You see in the above example that ``dayfirst`` isn"t strict, so if a date + can"t be parsed with the day being first it will be parsed as if ``dayfirst`` were False. If you pass a single string to ``to_datetime``, it returns a single ``Timestamp``. -``Timestamp`` can also accept string input, but it doesn't accept string parsing +``Timestamp`` can also accept string input, but it doesn"t accept string parsing options like ``dayfirst`` or ``format``, so use ``to_datetime`` if these are required. .. ipython:: python - pd.to_datetime('2010/11/12') + pd.to_datetime("2010/11/12") - pd.Timestamp('2010/11/12') + pd.Timestamp("2010/11/12") You can also use the ``DatetimeIndex`` constructor directly: .. ipython:: python - pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05']) + pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"]) -The string 'infer' can be passed in order to set the frequency of the index as the +The string "infer" can be passed in order to set the frequency of the index as the inferred frequency upon creation: .. ipython:: python - pd.DatetimeIndex(['2018-01-01', '2018-01-03', '2018-01-05'], freq='infer') + pd.DatetimeIndex(["2018-01-01", "2018-01-03", "2018-01-05"], freq="infer") .. _timeseries.converting.format: @@ -245,9 +245,9 @@ This could also potentially speed up the conversion considerably. .. ipython:: python - pd.to_datetime('2010/11/12', format='%Y/%m/%d') + pd.to_datetime("2010/11/12", format="%Y/%m/%d") - pd.to_datetime('12-11-2010 00:00', format='%d-%m-%Y %H:%M') + pd.to_datetime("12-11-2010 00:00", format="%d-%m-%Y %H:%M") For more information on the choices available when specifying the ``format`` option, see the Python `datetime documentation`_. @@ -261,10 +261,10 @@ You can also pass a ``DataFrame`` of integer or string columns to assemble into .. ipython:: python - df = pd.DataFrame({'year': [2015, 2016], - 'month': [2, 3], - 'day': [4, 5], - 'hour': [2, 3]}) + df = pd.DataFrame({"year": [2015, 2016], + "month": [2, 3], + "day": [4, 5], + "hour": [2, 3]}) pd.to_datetime(df) @@ -272,7 +272,7 @@ You can pass only the columns that you need to assemble. .. ipython:: python - pd.to_datetime(df[['year', 'month', 'day']]) + pd.to_datetime(df[["year", "month", "day"]]) ``pd.to_datetime`` looks for standard designations of the datetime component in the column names, including: @@ -282,24 +282,24 @@ You can pass only the columns that you need to assemble. Invalid data ~~~~~~~~~~~~ -The default behavior, ``errors='raise'``, is to raise when unparsable: +The default behavior, ``errors="raise"``, is to raise when unparsable: .. code-block:: ipython - In [2]: pd.to_datetime(['2009/07/31', 'asd'], errors='raise') + In [2]: pd.to_datetime(["2009/07/31", "asd"], errors="raise") ValueError: Unknown string format -Pass ``errors='ignore'`` to return the original input when unparsable: +Pass ``errors="ignore"`` to return the original input when unparsable: .. ipython:: python - pd.to_datetime(['2009/07/31', 'asd'], errors='ignore') + pd.to_datetime(["2009/07/31", "asd"], errors="ignore") -Pass ``errors='coerce'`` to convert unparsable data to ``NaT`` (not a time): +Pass ``errors="coerce"`` to convert unparsable data to ``NaT`` (not a time): .. ipython:: python - pd.to_datetime(['2009/07/31', 'asd'], errors='coerce') + pd.to_datetime(["2009/07/31", "asd"], errors="coerce") .. _timeseries.converting.epoch: @@ -316,10 +316,10 @@ which can be specified. These are computed from the starting point specified by .. ipython:: python pd.to_datetime([1349720105, 1349806505, 1349892905, - 1349979305, 1350065705], unit='s') + 1349979305, 1350065705], unit="s") pd.to_datetime([1349720105100, 1349720105200, 1349720105300, - 1349720105400, 1349720105500], unit='ms') + 1349720105400, 1349720105500], unit="ms") .. note:: @@ -336,8 +336,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone: .. ipython:: python - pd.Timestamp(1262347200000000000).tz_localize('US/Pacific') - pd.DatetimeIndex([1262347200000000000]).tz_localize('US/Pacific') + pd.Timestamp(1262347200000000000).tz_localize("US/Pacific") + pd.DatetimeIndex([1262347200000000000]).tz_localize("US/Pacific") .. note:: @@ -353,8 +353,8 @@ as timezone-naive timestamps and then localize to the appropriate timezone: .. ipython:: python - pd.to_datetime([1490195805.433, 1490195805.433502912], unit='s') - pd.to_datetime(1490195805433502912, unit='ns') + pd.to_datetime([1490195805.433, 1490195805.433502912], unit="s") + pd.to_datetime(1490195805433502912, unit="ns") .. seealso:: @@ -365,11 +365,11 @@ as timezone-naive timestamps and then localize to the appropriate timezone: From timestamps to epoch ~~~~~~~~~~~~~~~~~~~~~~~~ -To invert the operation from above, namely, to convert from a ``Timestamp`` to a 'unix' epoch: +To invert the operation from above, namely, to convert from a ``Timestamp`` to a "unix" epoch: .. ipython:: python - stamps = pd.date_range('2012-10-08 18:15:05', periods=4, freq='D') + stamps = pd.date_range("2012-10-08 18:15:05", periods=4, freq="D") stamps We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by the @@ -377,7 +377,7 @@ We subtract the epoch (midnight at January 1, 1970 UTC) and then floor divide by .. ipython:: python - (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta('1s') + (stamps - pd.Timestamp("1970-01-01")) // pd.Timedelta("1s") .. _timeseries.origin: @@ -389,14 +389,14 @@ of a ``DatetimeIndex``. For example, to use 1960-01-01 as the starting date: .. ipython:: python - pd.to_datetime([1, 2, 3], unit='D', origin=pd.Timestamp('1960-01-01')) + pd.to_datetime([1, 2, 3], unit="D", origin=pd.Timestamp("1960-01-01")) -The default is set at ``origin='unix'``, which defaults to ``1970-01-01 00:00:00``. -Commonly called 'unix epoch' or POSIX time. +The default is set at ``origin="unix"``, which defaults to ``1970-01-01 00:00:00``. +Commonly called "unix epoch" or POSIX time. .. ipython:: python - pd.to_datetime([1, 2, 3], unit='D') + pd.to_datetime([1, 2, 3], unit="D") .. _timeseries.daterange: @@ -442,9 +442,9 @@ variety of :ref:`frequency aliases `: .. ipython:: python - pd.date_range(start, periods=1000, freq='M') + pd.date_range(start, periods=1000, freq="M") - pd.bdate_range(start, periods=250, freq='BQS') + pd.bdate_range(start, periods=250, freq="BQS") ``date_range`` and ``bdate_range`` make it easy to generate a range of dates using various combinations of parameters like ``start``, ``end``, ``periods``, @@ -453,9 +453,9 @@ of those specified will not be generated: .. ipython:: python - pd.date_range(start, end, freq='BM') + pd.date_range(start, end, freq="BM") - pd.date_range(start, end, freq='W') + pd.date_range(start, end, freq="W") pd.bdate_range(end=end, periods=20) @@ -467,9 +467,9 @@ resulting ``DatetimeIndex``: .. ipython:: python - pd.date_range('2018-01-01', '2018-01-05', periods=5) + pd.date_range("2018-01-01", "2018-01-05", periods=5) - pd.date_range('2018-01-01', '2018-01-05', periods=10) + pd.date_range("2018-01-01", "2018-01-05", periods=10) .. _timeseries.custom-freq-ranges: @@ -482,13 +482,13 @@ used if a custom frequency string is passed. .. ipython:: python - weekmask = 'Mon Wed Fri' + weekmask = "Mon Wed Fri" holidays = [datetime.datetime(2011, 1, 5), datetime.datetime(2011, 3, 14)] - pd.bdate_range(start, end, freq='C', weekmask=weekmask, holidays=holidays) + pd.bdate_range(start, end, freq="C", weekmask=weekmask, holidays=holidays) - pd.bdate_range(start, end, freq='CBMS', weekmask=weekmask) + pd.bdate_range(start, end, freq="CBMS", weekmask=weekmask) .. seealso:: @@ -545,7 +545,7 @@ intelligent functionality like selection, slicing, etc. .. ipython:: python - rng = pd.date_range(start, end, freq='BM') + rng = pd.date_range(start, end, freq="BM") ts = pd.Series(np.random.randn(len(rng)), index=rng) ts.index ts[:5].index @@ -560,20 +560,20 @@ Dates and strings that parse to timestamps can be passed as indexing parameters: .. ipython:: python - ts['1/31/2011'] + ts["1/31/2011"] ts[datetime.datetime(2011, 12, 25):] - ts['10/31/2011':'12/31/2011'] + ts["10/31/2011":"12/31/2011"] To provide convenience for accessing longer time series, you can also pass in the year or year and month as strings: .. ipython:: python - ts['2011'] + ts["2011"] - ts['2011-6'] + ts["2011-6"] This type of slicing will work on a ``DataFrame`` with a ``DatetimeIndex`` as well. Since the partial string selection is a form of label slicing, the endpoints **will be** included. This @@ -586,10 +586,10 @@ would include matching times on an included date: .. ipython:: python :okwarning: - dft = pd.DataFrame(np.random.randn(100000, 1), columns=['A'], - index=pd.date_range('20130101', periods=100000, freq='T')) + dft = pd.DataFrame(np.random.randn(100000, 1), columns=["A"], + index=pd.date_range("20130101", periods=100000, freq="T")) dft - dft['2013'] + dft["2013"] This starts on the very first time in the month, and includes the last date and time for the month: @@ -597,43 +597,43 @@ time for the month: .. ipython:: python :okwarning: - dft['2013-1':'2013-2'] + dft["2013-1":"2013-2"] This specifies a stop time **that includes all of the times on the last day**: .. ipython:: python :okwarning: - dft['2013-1':'2013-2-28'] + dft["2013-1":"2013-2-28"] This specifies an **exact** stop time (and is not the same as the above): .. ipython:: python :okwarning: - dft['2013-1':'2013-2-28 00:00:00'] + dft["2013-1":"2013-2-28 00:00:00"] We are stopping on the included end-point as it is part of the index: .. ipython:: python :okwarning: - dft['2013-1-15':'2013-1-15 12:30:00'] + dft["2013-1-15":"2013-1-15 12:30:00"] ``DatetimeIndex`` partial string indexing also works on a ``DataFrame`` with a ``MultiIndex``: .. ipython:: python dft2 = pd.DataFrame(np.random.randn(20, 1), - columns=['A'], + columns=["A"], index=pd.MultiIndex.from_product( - [pd.date_range('20130101', periods=10, freq='12H'), - ['a', 'b']])) + [pd.date_range("20130101", periods=10, freq="12H"), + ["a", "b"]])) dft2 - dft2.loc['2013-01-05'] + dft2.loc["2013-01-05"] idx = pd.IndexSlice dft2 = dft2.swaplevel(0, 1).sort_index() - dft2.loc[idx[:, '2013-01-05'], :] + dft2.loc[idx[:, "2013-01-05"], :] .. versionadded:: 0.25.0 @@ -642,9 +642,9 @@ Slicing with string indexing also honors UTC offset. .. ipython:: python :okwarning: - df = pd.DataFrame([0], index=pd.DatetimeIndex(['2019-01-01'], tz='US/Pacific')) + df = pd.DataFrame([0], index=pd.DatetimeIndex(["2019-01-01"], tz="US/Pacific")) df - df['2019-01-01 12:00:00+04:00':'2019-01-01 13:00:00+04:00'] + df["2019-01-01 12:00:00+04:00":"2019-01-01 13:00:00+04:00"] .. _timeseries.slice_vs_exact_match: @@ -658,23 +658,23 @@ Consider a ``Series`` object with a minute resolution index: .. ipython:: python series_minute = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12-31 23:59:00', - '2012-01-01 00:00:00', - '2012-01-01 00:02:00'])) + pd.DatetimeIndex(["2011-12-31 23:59:00", + "2012-01-01 00:00:00", + "2012-01-01 00:02:00"])) series_minute.index.resolution A timestamp string less accurate than a minute gives a ``Series`` object. .. ipython:: python - series_minute['2011-12-31 23'] + series_minute["2011-12-31 23"] A timestamp string with minute resolution (or more accurate), gives a scalar instead, i.e. it is not casted to a slice. .. ipython:: python - series_minute['2011-12-31 23:59'] - series_minute['2011-12-31 23:59:00'] + series_minute["2011-12-31 23:59"] + series_minute["2011-12-31 23:59:00"] If index resolution is second, then the minute-accurate timestamp gives a ``Series``. @@ -682,40 +682,40 @@ If index resolution is second, then the minute-accurate timestamp gives a .. ipython:: python series_second = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12-31 23:59:59', - '2012-01-01 00:00:00', - '2012-01-01 00:00:01'])) + pd.DatetimeIndex(["2011-12-31 23:59:59", + "2012-01-01 00:00:00", + "2012-01-01 00:00:01"])) series_second.index.resolution - series_second['2011-12-31 23:59'] + series_second["2011-12-31 23:59"] If the timestamp string is treated as a slice, it can be used to index ``DataFrame`` with ``[]`` as well. .. ipython:: python :okwarning: - dft_minute = pd.DataFrame({'a': [1, 2, 3], 'b': [4, 5, 6]}, + dft_minute = pd.DataFrame({"a": [1, 2, 3], "b": [4, 5, 6]}, index=series_minute.index) - dft_minute['2011-12-31 23'] + dft_minute["2011-12-31 23"] .. warning:: - However, if the string is treated as an exact match, the selection in ``DataFrame``'s ``[]`` will be column-wise and not row-wise, see :ref:`Indexing Basics `. For example ``dft_minute['2011-12-31 23:59']`` will raise ``KeyError`` as ``'2012-12-31 23:59'`` has the same resolution as the index and there is no column with such name: + However, if the string is treated as an exact match, the selection in ``DataFrame``"s ``[]`` will be column-wise and not row-wise, see :ref:`Indexing Basics `. For example ``dft_minute["2011-12-31 23:59"]`` will raise ``KeyError`` as ``"2012-12-31 23:59"`` has the same resolution as the index and there is no column with such name: To *always* have unambiguous selection, whether the row is treated as a slice or a single selection, use ``.loc``. .. ipython:: python - dft_minute.loc['2011-12-31 23:59'] + dft_minute.loc["2011-12-31 23:59"] Note also that ``DatetimeIndex`` resolution cannot be less precise than day. .. ipython:: python series_monthly = pd.Series([1, 2, 3], - pd.DatetimeIndex(['2011-12', '2012-01', '2012-02'])) + pd.DatetimeIndex(["2011-12", "2012-01", "2012-02"])) series_monthly.index.resolution - series_monthly['2011-12'] # returns Series + series_monthly["2011-12"] # returns Series Exact indexing @@ -747,11 +747,11 @@ partially matching dates: .. ipython:: python - rng2 = pd.date_range('2011-01-01', '2012-01-01', freq='W') + rng2 = pd.date_range("2011-01-01", "2012-01-01", freq="W") ts2 = pd.Series(np.random.randn(len(rng2)), index=rng2) - ts2.truncate(before='2011-11', after='2011-12') - ts2['2011-11':'2011-12'] + ts2.truncate(before="2011-11", after="2011-12") + ts2["2011-11":"2011-12"] Even complicated fancy indexing that breaks the ``DatetimeIndex`` frequency regularity will result in a ``DatetimeIndex``, although frequency is lost: @@ -807,7 +807,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86 .. ipython:: python - idx = pd.date_range(start='2019-12-29', freq='D', periods=4) + idx = pd.date_range(start="2019-12-29", freq="D", periods=4) idx.isocalendar() idx.to_series().dt.isocalendar() @@ -816,7 +816,7 @@ You may obtain the year, week and day components of the ISO year from the ISO 86 DateOffset objects ------------------ -In the preceding examples, frequency strings (e.g. ``'D'``) were used to specify +In the preceding examples, frequency strings (e.g. ``"D"``) were used to specify a frequency that defined: * how the date times in :class:`DatetimeIndex` were spaced when using :meth:`date_range` @@ -837,12 +837,12 @@ arithmetic operator (``+``) or the ``apply`` method can be used to perform the s .. ipython:: python # This particular day contains a day light savings time transition - ts = pd.Timestamp('2016-10-30 00:00:00', tz='Europe/Helsinki') + ts = pd.Timestamp("2016-10-30 00:00:00", tz="Europe/Helsinki") # Respects absolute time ts + pd.Timedelta(days=1) # Respects calendar time ts + pd.DateOffset(days=1) - friday = pd.Timestamp('2018-01-05') + friday = pd.Timestamp("2018-01-05") friday.day_name() # Add 2 business days (Friday --> Tuesday) two_business_days = 2 * pd.offsets.BDay() @@ -858,39 +858,39 @@ into ``freq`` keyword arguments. The available date offsets and associated frequ :widths: 15, 15, 65 :class:`~pandas.tseries.offsets.DateOffset`, None, "Generic offset class, defaults to 1 calendar day" - :class:`~pandas.tseries.offsets.BDay` or :class:`~pandas.tseries.offsets.BusinessDay`, ``'B'``,"business day (weekday)" - :class:`~pandas.tseries.offsets.CDay` or :class:`~pandas.tseries.offsets.CustomBusinessDay`, ``'C'``, "custom business day" - :class:`~pandas.tseries.offsets.Week`, ``'W'``, "one week, optionally anchored on a day of the week" - :class:`~pandas.tseries.offsets.WeekOfMonth`, ``'WOM'``, "the x-th day of the y-th week of each month" - :class:`~pandas.tseries.offsets.LastWeekOfMonth`, ``'LWOM'``, "the x-th day of the last week of each month" - :class:`~pandas.tseries.offsets.MonthEnd`, ``'M'``, "calendar month end" - :class:`~pandas.tseries.offsets.MonthBegin`, ``'MS'``, "calendar month begin" - :class:`~pandas.tseries.offsets.BMonthEnd` or :class:`~pandas.tseries.offsets.BusinessMonthEnd`, ``'BM'``, "business month end" - :class:`~pandas.tseries.offsets.BMonthBegin` or :class:`~pandas.tseries.offsets.BusinessMonthBegin`, ``'BMS'``, "business month begin" - :class:`~pandas.tseries.offsets.CBMonthEnd` or :class:`~pandas.tseries.offsets.CustomBusinessMonthEnd`, ``'CBM'``, "custom business month end" - :class:`~pandas.tseries.offsets.CBMonthBegin` or :class:`~pandas.tseries.offsets.CustomBusinessMonthBegin`, ``'CBMS'``, "custom business month begin" - :class:`~pandas.tseries.offsets.SemiMonthEnd`, ``'SM'``, "15th (or other day_of_month) and calendar month end" - :class:`~pandas.tseries.offsets.SemiMonthBegin`, ``'SMS'``, "15th (or other day_of_month) and calendar month begin" - :class:`~pandas.tseries.offsets.QuarterEnd`, ``'Q'``, "calendar quarter end" - :class:`~pandas.tseries.offsets.QuarterBegin`, ``'QS'``, "calendar quarter begin" - :class:`~pandas.tseries.offsets.BQuarterEnd`, ``'BQ``, "business quarter end" - :class:`~pandas.tseries.offsets.BQuarterBegin`, ``'BQS'``, "business quarter begin" - :class:`~pandas.tseries.offsets.FY5253Quarter`, ``'REQ'``, "retail (aka 52-53 week) quarter" - :class:`~pandas.tseries.offsets.YearEnd`, ``'A'``, "calendar year end" - :class:`~pandas.tseries.offsets.YearBegin`, ``'AS'`` or ``'BYS'``,"calendar year begin" - :class:`~pandas.tseries.offsets.BYearEnd`, ``'BA'``, "business year end" - :class:`~pandas.tseries.offsets.BYearBegin`, ``'BAS'``, "business year begin" - :class:`~pandas.tseries.offsets.FY5253`, ``'RE'``, "retail (aka 52-53 week) year" + :class:`~pandas.tseries.offsets.BDay` or :class:`~pandas.tseries.offsets.BusinessDay`, ``"B"``,"business day (weekday)" + :class:`~pandas.tseries.offsets.CDay` or :class:`~pandas.tseries.offsets.CustomBusinessDay`, ``"C"``, "custom business day" + :class:`~pandas.tseries.offsets.Week`, ``"W"``, "one week, optionally anchored on a day of the week" + :class:`~pandas.tseries.offsets.WeekOfMonth`, ``"WOM"``, "the x-th day of the y-th week of each month" + :class:`~pandas.tseries.offsets.LastWeekOfMonth`, ``"LWOM"``, "the x-th day of the last week of each month" + :class:`~pandas.tseries.offsets.MonthEnd`, ``"M"``, "calendar month end" + :class:`~pandas.tseries.offsets.MonthBegin`, ``"MS"``, "calendar month begin" + :class:`~pandas.tseries.offsets.BMonthEnd` or :class:`~pandas.tseries.offsets.BusinessMonthEnd`, ``"BM"``, "business month end" + :class:`~pandas.tseries.offsets.BMonthBegin` or :class:`~pandas.tseries.offsets.BusinessMonthBegin`, ``"BMS"``, "business month begin" + :class:`~pandas.tseries.offsets.CBMonthEnd` or :class:`~pandas.tseries.offsets.CustomBusinessMonthEnd`, ``"CBM"``, "custom business month end" + :class:`~pandas.tseries.offsets.CBMonthBegin` or :class:`~pandas.tseries.offsets.CustomBusinessMonthBegin`, ``"CBMS"``, "custom business month begin" + :class:`~pandas.tseries.offsets.SemiMonthEnd`, ``"SM"``, "15th (or other day_of_month) and calendar month end" + :class:`~pandas.tseries.offsets.SemiMonthBegin`, ``"SMS"``, "15th (or other day_of_month) and calendar month begin" + :class:`~pandas.tseries.offsets.QuarterEnd`, ``"Q"``, "calendar quarter end" + :class:`~pandas.tseries.offsets.QuarterBegin`, ``"QS"``, "calendar quarter begin" + :class:`~pandas.tseries.offsets.BQuarterEnd`, ``"BQ``, "business quarter end" + :class:`~pandas.tseries.offsets.BQuarterBegin`, ``"BQS"``, "business quarter begin" + :class:`~pandas.tseries.offsets.FY5253Quarter`, ``"REQ"``, "retail (aka 52-53 week) quarter" + :class:`~pandas.tseries.offsets.YearEnd`, ``"A"``, "calendar year end" + :class:`~pandas.tseries.offsets.YearBegin`, ``"AS"`` or ``"BYS"``,"calendar year begin" + :class:`~pandas.tseries.offsets.BYearEnd`, ``"BA"``, "business year end" + :class:`~pandas.tseries.offsets.BYearBegin`, ``"BAS"``, "business year begin" + :class:`~pandas.tseries.offsets.FY5253`, ``"RE"``, "retail (aka 52-53 week) year" :class:`~pandas.tseries.offsets.Easter`, None, "Easter holiday" - :class:`~pandas.tseries.offsets.BusinessHour`, ``'BH'``, "business hour" - :class:`~pandas.tseries.offsets.CustomBusinessHour`, ``'CBH'``, "custom business hour" - :class:`~pandas.tseries.offsets.Day`, ``'D'``, "one absolute day" - :class:`~pandas.tseries.offsets.Hour`, ``'H'``, "one hour" - :class:`~pandas.tseries.offsets.Minute`, ``'T'`` or ``'min'``,"one minute" - :class:`~pandas.tseries.offsets.Second`, ``'S'``, "one second" - :class:`~pandas.tseries.offsets.Milli`, ``'L'`` or ``'ms'``, "one millisecond" - :class:`~pandas.tseries.offsets.Micro`, ``'U'`` or ``'us'``, "one microsecond" - :class:`~pandas.tseries.offsets.Nano`, ``'N'``, "one nanosecond" + :class:`~pandas.tseries.offsets.BusinessHour`, ``"BH"``, "business hour" + :class:`~pandas.tseries.offsets.CustomBusinessHour`, ``"CBH"``, "custom business hour" + :class:`~pandas.tseries.offsets.Day`, ``"D"``, "one absolute day" + :class:`~pandas.tseries.offsets.Hour`, ``"H"``, "one hour" + :class:`~pandas.tseries.offsets.Minute`, ``"T"`` or ``"min"``,"one minute" + :class:`~pandas.tseries.offsets.Second`, ``"S"``, "one second" + :class:`~pandas.tseries.offsets.Milli`, ``"L"`` or ``"ms"``, "one millisecond" + :class:`~pandas.tseries.offsets.Micro`, ``"U"`` or ``"us"``, "one microsecond" + :class:`~pandas.tseries.offsets.Nano`, ``"N"``, "one nanosecond" ``DateOffsets`` additionally have :meth:`rollforward` and :meth:`rollback` methods for moving a date forward or backward respectively to a valid offset @@ -900,10 +900,10 @@ business offsets operate on the weekdays. .. ipython:: python - ts = pd.Timestamp('2018-01-06 00:00:00') + ts = pd.Timestamp("2018-01-06 00:00:00") ts.day_name() - # BusinessHour's valid offset dates are Monday through Friday - offset = pd.offsets.BusinessHour(start='09:00') + # BusinessHour"s valid offset dates are Monday through Friday + offset = pd.offsets.BusinessHour(start="09:00") # Bring the date to the closest offset date (Monday) offset.rollforward(ts) # Date is brought to the closest offset date first and then the hour is added @@ -916,12 +916,12 @@ in the operation). .. ipython:: python - ts = pd.Timestamp('2014-01-01 09:00') + ts = pd.Timestamp("2014-01-01 09:00") day = pd.offsets.Day() day.apply(ts) day.apply(ts).normalize() - ts = pd.Timestamp('2014-01-01 22:00') + ts = pd.Timestamp("2014-01-01 22:00") hour = pd.offsets.Hour() hour.apply(ts) hour.apply(ts).normalize() @@ -974,7 +974,7 @@ apply the offset to each element. .. ipython:: python - rng = pd.date_range('2012-01-01', '2012-01-03') + rng = pd.date_range("2012-01-01", "2012-01-03") s = pd.Series(rng) rng rng + pd.DateOffset(months=2) @@ -989,7 +989,7 @@ used exactly like a ``Timedelta`` - see the .. ipython:: python s - pd.offsets.Day(2) - td = s - pd.Series(pd.date_range('2011-12-29', '2011-12-31')) + td = s - pd.Series(pd.date_range("2011-12-29", "2011-12-31")) td td + pd.offsets.Minute(15) @@ -1012,31 +1012,31 @@ The ``CDay`` or ``CustomBusinessDay`` class provides a parametric ``BusinessDay`` class which can be used to create customized business day calendars which account for local holidays and local weekend conventions. -As an interesting example, let's look at Egypt where a Friday-Saturday weekend is observed. +As an interesting example, let"s look at Egypt where a Friday-Saturday weekend is observed. .. ipython:: python - weekmask_egypt = 'Sun Mon Tue Wed Thu' + weekmask_egypt = "Sun Mon Tue Wed Thu" - # They also observe International Workers' Day so let's + # They also observe International Workers" Day so let"s # add that for a couple of years - holidays = ['2012-05-01', + holidays = ["2012-05-01", datetime.datetime(2013, 5, 1), - np.datetime64('2014-05-01')] + np.datetime64("2014-05-01")] bday_egypt = pd.offsets.CustomBusinessDay(holidays=holidays, weekmask=weekmask_egypt) dt = datetime.datetime(2013, 4, 30) dt + 2 * bday_egypt -Let's map to the weekday names: +Let"s map to the weekday names: .. ipython:: python dts = pd.date_range(dt, periods=5, freq=bday_egypt) pd.Series(dts.weekday, dts).map( - pd.Series('Mon Tue Wed Thu Fri Sat Sun'.split())) + pd.Series("Mon Tue Wed Thu Fri Sat Sun".split())) Holiday calendars can be used to provide the list of holidays. See the :ref:`holiday calendar` section for more information. @@ -1050,7 +1050,7 @@ Holiday calendars can be used to provide the list of holidays. See the # Friday before MLK Day dt = datetime.datetime(2014, 1, 17) - # Tuesday after MLK Day (Monday is skipped because it's a holiday) + # Tuesday after MLK Day (Monday is skipped because it"s a holiday) dt + bday_us Monthly offsets that respect a certain holiday calendar can be defined @@ -1066,15 +1066,15 @@ in the usual way. dt + bmth_us # Define date index with custom offset - pd.date_range(start='20100101', end='20120101', freq=bmth_us) + pd.date_range(start="20100101", end="20120101", freq=bmth_us) .. note:: - The frequency string 'C' is used to indicate that a CustomBusinessDay + The frequency string "C" is used to indicate that a CustomBusinessDay DateOffset is used, it is important to note that since CustomBusinessDay is a parameterised type, instances of CustomBusinessDay may differ and this is - not detectable from the 'C' frequency string. The user therefore needs to - ensure that the 'C' frequency string is used consistently within the user's + not detectable from the "C" frequency string. The user therefore needs to + ensure that the "C" frequency string is used consistently within the user"s application. .. _timeseries.businesshour: @@ -1097,23 +1097,23 @@ hours are added to the next business day. bh # 2014-08-01 is Friday - pd.Timestamp('2014-08-01 10:00').weekday() - pd.Timestamp('2014-08-01 10:00') + bh + pd.Timestamp("2014-08-01 10:00").weekday() + pd.Timestamp("2014-08-01 10:00") + bh - # Below example is the same as: pd.Timestamp('2014-08-01 09:00') + bh - pd.Timestamp('2014-08-01 08:00') + bh + # Below example is the same as: pd.Timestamp("2014-08-01 09:00") + bh + pd.Timestamp("2014-08-01 08:00") + bh # If the results is on the end time, move to the next business day - pd.Timestamp('2014-08-01 16:00') + bh + pd.Timestamp("2014-08-01 16:00") + bh # Remainings are added to the next day - pd.Timestamp('2014-08-01 16:30') + bh + pd.Timestamp("2014-08-01 16:30") + bh # Adding 2 business hours - pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(2) + pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(2) # Subtracting 3 business hours - pd.Timestamp('2014-08-01 10:00') + pd.offsets.BusinessHour(-3) + pd.Timestamp("2014-08-01 10:00") + pd.offsets.BusinessHour(-3) You can also specify ``start`` and ``end`` time by keywords. The argument must be a ``str`` with an ``hour:minute`` representation or a ``datetime.time`` @@ -1122,12 +1122,12 @@ results in ``ValueError``. .. ipython:: python - bh = pd.offsets.BusinessHour(start='11:00', end=datetime.time(20, 0)) + bh = pd.offsets.BusinessHour(start="11:00", end=datetime.time(20, 0)) bh - pd.Timestamp('2014-08-01 13:00') + bh - pd.Timestamp('2014-08-01 09:00') + bh - pd.Timestamp('2014-08-01 18:00') + bh + pd.Timestamp("2014-08-01 13:00") + bh + pd.Timestamp("2014-08-01 09:00") + bh + pd.Timestamp("2014-08-01 18:00") + bh Passing ``start`` time later than ``end`` represents midnight business hour. In this case, business hour exceeds midnight and overlap to the next day. @@ -1135,44 +1135,44 @@ Valid business hours are distinguished by whether it started from valid ``Busine .. ipython:: python - bh = pd.offsets.BusinessHour(start='17:00', end='09:00') + bh = pd.offsets.BusinessHour(start="17:00", end="09:00") bh - pd.Timestamp('2014-08-01 17:00') + bh - pd.Timestamp('2014-08-01 23:00') + bh + pd.Timestamp("2014-08-01 17:00") + bh + pd.Timestamp("2014-08-01 23:00") + bh # Although 2014-08-02 is Saturday, # it is valid because it starts from 08-01 (Friday). - pd.Timestamp('2014-08-02 04:00') + bh + pd.Timestamp("2014-08-02 04:00") + bh # Although 2014-08-04 is Monday, # it is out of business hours because it starts from 08-03 (Sunday). - pd.Timestamp('2014-08-04 04:00') + bh + pd.Timestamp("2014-08-04 04:00") + bh Applying ``BusinessHour.rollforward`` and ``rollback`` to out of business hours results in -the next business hour start or previous day's end. Different from other offsets, ``BusinessHour.rollforward`` +the next business hour start or previous day"s end. Different from other offsets, ``BusinessHour.rollforward`` may output different results from ``apply`` by definition. -This is because one day's business hour end is equal to next day's business hour start. For example, +This is because one day"s business hour end is equal to next day"s business hour start. For example, under the default business hours (9:00 - 17:00), there is no gap (0 minutes) between ``2014-08-01 17:00`` and ``2014-08-04 09:00``. .. ipython:: python # This adjusts a Timestamp to business hour edge - pd.offsets.BusinessHour().rollback(pd.Timestamp('2014-08-02 15:00')) - pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02 15:00')) + pd.offsets.BusinessHour().rollback(pd.Timestamp("2014-08-02 15:00")) + pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02 15:00")) - # It is the same as BusinessHour().apply(pd.Timestamp('2014-08-01 17:00')). - # And it is the same as BusinessHour().apply(pd.Timestamp('2014-08-04 09:00')) - pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02 15:00')) + # It is the same as BusinessHour().apply(pd.Timestamp("2014-08-01 17:00")). + # And it is the same as BusinessHour().apply(pd.Timestamp("2014-08-04 09:00")) + pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02 15:00")) # BusinessDay results (for reference) - pd.offsets.BusinessHour().rollforward(pd.Timestamp('2014-08-02')) + pd.offsets.BusinessHour().rollforward(pd.Timestamp("2014-08-02")) - # It is the same as BusinessDay().apply(pd.Timestamp('2014-08-01')) + # It is the same as BusinessDay().apply(pd.Timestamp("2014-08-01")) # The result is the same as rollworward because BusinessDay never overlap. - pd.offsets.BusinessHour().apply(pd.Timestamp('2014-08-02')) + pd.offsets.BusinessHour().apply(pd.Timestamp("2014-08-02")) ``BusinessHour`` regards Saturday and Sunday as holidays. To use arbitrary holidays, you can use ``CustomBusinessHour`` offset, as explained in the @@ -1196,17 +1196,17 @@ as ``BusinessHour`` except that it skips specified custom holidays. dt + bhour_us - # Tuesday after MLK Day (Monday is skipped because it's a holiday) + # Tuesday after MLK Day (Monday is skipped because it"s a holiday) dt + bhour_us * 2 You can use keyword arguments supported by either ``BusinessHour`` and ``CustomBusinessDay``. .. ipython:: python - bhour_mon = pd.offsets.CustomBusinessHour(start='10:00', - weekmask='Tue Wed Thu Fri') + bhour_mon = pd.offsets.CustomBusinessHour(start="10:00", + weekmask="Tue Wed Thu Fri") - # Monday is skipped because it's a holiday, business hour starts from 10:00 + # Monday is skipped because it"s a holiday, business hour starts from 10:00 dt + bhour_mon * 2 .. _timeseries.offset_aliases: @@ -1257,7 +1257,7 @@ most functions: .. ipython:: python - pd.date_range(start, periods=5, freq='B') + pd.date_range(start, periods=5, freq="B") pd.date_range(start, periods=5, freq=pd.offsets.BDay()) @@ -1265,9 +1265,9 @@ You can combine together day and intraday offsets: .. ipython:: python - pd.date_range(start, periods=10, freq='2h20min') + pd.date_range(start, periods=10, freq="2h20min") - pd.date_range(start, periods=10, freq='1D10U') + pd.date_range(start, periods=10, freq="1D10U") Anchored offsets ~~~~~~~~~~~~~~~~ @@ -1278,14 +1278,14 @@ For some frequencies you can specify an anchoring suffix: :header: "Alias", "Description" :widths: 15, 100 - "W\-SUN", "weekly frequency (Sundays). Same as 'W'" + "W\-SUN", "weekly frequency (Sundays). Same as "W"" "W\-MON", "weekly frequency (Mondays)" "W\-TUE", "weekly frequency (Tuesdays)" "W\-WED", "weekly frequency (Wednesdays)" "W\-THU", "weekly frequency (Thursdays)" "W\-FRI", "weekly frequency (Fridays)" "W\-SAT", "weekly frequency (Saturdays)" - "(B)Q(S)\-DEC", "quarterly frequency, year ends in December. Same as 'Q'" + "(B)Q(S)\-DEC", "quarterly frequency, year ends in December. Same as "Q"" "(B)Q(S)\-JAN", "quarterly frequency, year ends in January" "(B)Q(S)\-FEB", "quarterly frequency, year ends in February" "(B)Q(S)\-MAR", "quarterly frequency, year ends in March" @@ -1297,7 +1297,7 @@ For some frequencies you can specify an anchoring suffix: "(B)Q(S)\-SEP", "quarterly frequency, year ends in September" "(B)Q(S)\-OCT", "quarterly frequency, year ends in October" "(B)Q(S)\-NOV", "quarterly frequency, year ends in November" - "(B)A(S)\-DEC", "annual frequency, anchored end of December. Same as 'A'" + "(B)A(S)\-DEC", "annual frequency, anchored end of December. Same as "A"" "(B)A(S)\-JAN", "annual frequency, anchored end of January" "(B)A(S)\-FEB", "annual frequency, anchored end of February" "(B)A(S)\-MAR", "annual frequency, anchored end of March" @@ -1326,39 +1326,39 @@ anchor point, and moved ``|n|-1`` additional steps forwards or backwards. .. ipython:: python - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-02') - pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-02") - pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=4) - pd.Timestamp('2014-01-02') - pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-02") - pd.offsets.MonthBegin(n=4) If the given date *is* on an anchor point, it is moved ``|n|`` points forwards or backwards. .. ipython:: python - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-01') - pd.offsets.MonthBegin(n=1) - pd.Timestamp('2014-01-31') - pd.offsets.MonthEnd(n=1) + pd.Timestamp("2014-01-01") - pd.offsets.MonthBegin(n=1) + pd.Timestamp("2014-01-31") - pd.offsets.MonthEnd(n=1) - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=4) - pd.Timestamp('2014-01-31') - pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=4) + pd.Timestamp("2014-01-31") - pd.offsets.MonthBegin(n=4) For the case when ``n=0``, the date is not moved if on an anchor point, otherwise it is rolled forward to the next anchor point. .. ipython:: python - pd.Timestamp('2014-01-02') + pd.offsets.MonthBegin(n=0) - pd.Timestamp('2014-01-02') + pd.offsets.MonthEnd(n=0) + pd.Timestamp("2014-01-02") + pd.offsets.MonthBegin(n=0) + pd.Timestamp("2014-01-02") + pd.offsets.MonthEnd(n=0) - pd.Timestamp('2014-01-01') + pd.offsets.MonthBegin(n=0) - pd.Timestamp('2014-01-31') + pd.offsets.MonthEnd(n=0) + pd.Timestamp("2014-01-01") + pd.offsets.MonthBegin(n=0) + pd.Timestamp("2014-01-31") + pd.offsets.MonthEnd(n=0) .. _timeseries.holiday: @@ -1399,8 +1399,8 @@ An example of how holidays and holiday calendars are defined: class ExampleCalendar(AbstractHolidayCalendar): rules = [ USMemorialDay, - Holiday('July 4th', month=7, day=4, observance=nearest_workday), - Holiday('Columbus Day', month=10, day=1, + Holiday("July 4th", month=7, day=4, observance=nearest_workday), + Holiday("Columbus Day", month=10, day=1, offset=pd.DateOffset(weekday=MO(2)))] cal = ExampleCalendar() @@ -1417,7 +1417,7 @@ or ``Timestamp`` objects. .. ipython:: python - pd.date_range(start='7/1/2012', end='7/10/2012', + pd.date_range(start="7/1/2012", end="7/10/2012", freq=pd.offsets.CDay(calendar=cal)).to_pydatetime() offset = pd.offsets.CustomBusinessDay(calendar=cal) datetime.datetime(2012, 5, 25) + offset @@ -1452,9 +1452,9 @@ or calendars with additional rules. from pandas.tseries.holiday import get_calendar, HolidayCalendarFactory,\ USLaborDay - cal = get_calendar('ExampleCalendar') + cal = get_calendar("ExampleCalendar") cal.rules - new_cal = HolidayCalendarFactory('NewExampleCalendar', cal, USLaborDay) + new_cal = HolidayCalendarFactory("NewExampleCalendar", cal, USLaborDay) new_cal.rules .. _timeseries.advanced_datetime: @@ -1484,9 +1484,9 @@ rather than changing the alignment of the data and the index: .. ipython:: python - ts.shift(5, freq='D') + ts.shift(5, freq="D") ts.shift(5, freq=pd.offsets.BDay()) - ts.shift(5, freq='BM') + ts.shift(5, freq="BM") Note that with when ``freq`` is specified, the leading entry is no longer NaN because the data is not being realigned. @@ -1501,7 +1501,7 @@ calls ``reindex``. .. ipython:: python - dr = pd.date_range('1/1/2010', periods=3, freq=3 * pd.offsets.BDay()) + dr = pd.date_range("1/1/2010", periods=3, freq=3 * pd.offsets.BDay()) ts = pd.Series(np.random.randn(3), index=dr) ts ts.asfreq(pd.offsets.BDay()) @@ -1511,7 +1511,7 @@ method for any gaps that may appear after the frequency conversion. .. ipython:: python - ts.asfreq(pd.offsets.BDay(), method='pad') + ts.asfreq(pd.offsets.BDay(), method="pad") Filling forward / backward ~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1552,11 +1552,11 @@ Basics .. ipython:: python - rng = pd.date_range('1/1/2012', periods=100, freq='S') + rng = pd.date_range("1/1/2012", periods=100, freq="S") ts = pd.Series(np.random.randint(0, 500, len(rng)), index=rng) - ts.resample('5Min').sum() + ts.resample("5Min").sum() The ``resample`` function is very flexible and allows you to specify many different parameters to control the frequency conversion and resampling @@ -1568,21 +1568,21 @@ a method of the returned object, including ``sum``, ``mean``, ``std``, ``sem``, .. ipython:: python - ts.resample('5Min').mean() + ts.resample("5Min").mean() - ts.resample('5Min').ohlc() + ts.resample("5Min").ohlc() - ts.resample('5Min').max() + ts.resample("5Min").max() -For downsampling, ``closed`` can be set to 'left' or 'right' to specify which +For downsampling, ``closed`` can be set to "left" or "right" to specify which end of the interval is closed: .. ipython:: python - ts.resample('5Min', closed='right').mean() + ts.resample("5Min", closed="right").mean() - ts.resample('5Min', closed='left').mean() + ts.resample("5Min", closed="left").mean() Parameters like ``label`` are used to manipulate the resulting labels. ``label`` specifies whether the result is labeled with the beginning or @@ -1590,15 +1590,15 @@ the end of the interval. .. ipython:: python - ts.resample('5Min').mean() # by default label='left' + ts.resample("5Min").mean() # by default label="left" - ts.resample('5Min', label='left').mean() + ts.resample("5Min", label="left").mean() .. warning:: - The default values for ``label`` and ``closed`` is '**left**' for all - frequency offsets except for 'M', 'A', 'Q', 'BM', 'BA', 'BQ', and 'W' - which all have a default of 'right'. + The default values for ``label`` and ``closed`` is "**left**" for all + frequency offsets except for "M", "A", "Q", "BM", "BA", "BQ", and "W" + which all have a default of "right". This might unintendedly lead to looking ahead, where the value for a later time is pulled back to a previous time as in the following example with @@ -1606,12 +1606,12 @@ the end of the interval. .. ipython:: python - s = pd.date_range('2000-01-01', '2000-01-05').to_series() + s = pd.date_range("2000-01-01", "2000-01-05").to_series() s.iloc[2] = pd.NaT s.dt.day_name() - # default: label='left', closed='left' - s.resample('B').last().dt.day_name() + # default: label="left", closed="left" + s.resample("B").last().dt.day_name() Notice how the value for Sunday got pulled back to the previous Friday. To get the behavior where the value for Sunday is pushed to Monday, use @@ -1619,16 +1619,16 @@ the end of the interval. .. ipython:: python - s.resample('B', label='right', closed='right').last().dt.day_name() + s.resample("B", label="right", closed="right").last().dt.day_name() The ``axis`` parameter can be set to 0 or 1 and allows you to resample the specified axis for a ``DataFrame``. -``kind`` can be set to 'timestamp' or 'period' to convert the resulting index +``kind`` can be set to "timestamp" or "period" to convert the resulting index to/from timestamp and time span representations. By default ``resample`` retains the input representation. -``convention`` can be set to 'start' or 'end' when resampling period data +``convention`` can be set to "start" or "end" when resampling period data (detail below). It specifies how low frequency periods are converted to higher frequency periods. @@ -1642,18 +1642,18 @@ For upsampling, you can specify a way to upsample and the ``limit`` parameter to # from secondly to every 250 milliseconds - ts[:2].resample('250L').asfreq() + ts[:2].resample("250L").asfreq() - ts[:2].resample('250L').ffill() + ts[:2].resample("250L").ffill() - ts[:2].resample('250L').ffill(limit=2) + ts[:2].resample("250L").ffill(limit=2) Sparse resampling ~~~~~~~~~~~~~~~~~ Sparse timeseries are the ones where you have a lot fewer points relative to the amount of time you are looking to resample. Naively upsampling a sparse -series can potentially generate lots of intermediate values. When you don't want +series can potentially generate lots of intermediate values. When you don"t want to use a method to fill these values, e.g. ``fill_method`` is ``None``, then intermediate values will be filled with ``NaN``. @@ -1662,14 +1662,14 @@ resample only the groups that are not all ``NaN``. .. ipython:: python - rng = pd.date_range('2014-1-1', periods=100, freq='D') + pd.Timedelta('1s') + rng = pd.date_range("2014-1-1", periods=100, freq="D") + pd.Timedelta("1s") ts = pd.Series(range(100), index=rng) If we want to resample to the full range of the series: .. ipython:: python - ts.resample('3T').sum() + ts.resample("3T").sum() We can instead only resample those groups where we have points as follows: @@ -1683,7 +1683,7 @@ We can instead only resample those groups where we have points as follows: freq = to_offset(freq) return pd.Timestamp((t.value // freq.delta.value) * freq.delta.value) - ts.groupby(partial(round, freq='3T')).sum() + ts.groupby(partial(round, freq="3T")).sum() .. _timeseries.aggregate: @@ -1698,24 +1698,24 @@ Resampling a ``DataFrame``, the default will be to act on all columns with the s .. ipython:: python df = pd.DataFrame(np.random.randn(1000, 3), - index=pd.date_range('1/1/2012', freq='S', periods=1000), - columns=['A', 'B', 'C']) - r = df.resample('3T') + index=pd.date_range("1/1/2012", freq="S", periods=1000), + columns=["A", "B", "C"]) + r = df.resample("3T") r.mean() We can select a specific column or columns using standard getitem. .. ipython:: python - r['A'].mean() + r["A"].mean() - r[['A', 'B']].mean() + r[["A", "B"]].mean() You can pass a list or dict of functions to do aggregation with, outputting a ``DataFrame``: .. ipython:: python - r['A'].agg([np.sum, np.mean, np.std]) + r["A"].agg([np.sum, np.mean, np.std]) On a resampled ``DataFrame``, you can pass a list of functions to apply to each column, which produces an aggregated result with a hierarchical index: @@ -1730,21 +1730,21 @@ columns of a ``DataFrame``: .. ipython:: python :okexcept: - r.agg({'A': np.sum, - 'B': lambda x: np.std(x, ddof=1)}) + r.agg({"A": np.sum, + "B": lambda x: np.std(x, ddof=1)}) The function names can also be strings. In order for a string to be valid it must be implemented on the resampled object: .. ipython:: python - r.agg({'A': 'sum', 'B': 'std'}) + r.agg({"A": "sum", "B": "std"}) Furthermore, you can also specify multiple aggregation functions for each column separately. .. ipython:: python - r.agg({'A': ['sum', 'std'], 'B': ['mean', 'std']}) + r.agg({"A": ["sum", "std"], "B": ["mean", "std"]}) If a ``DataFrame`` does not have a datetimelike index, but instead you want @@ -1753,14 +1753,14 @@ to resample based on datetimelike column in the frame, it can passed to the .. ipython:: python - df = pd.DataFrame({'date': pd.date_range('2015-01-01', freq='W', periods=5), - 'a': np.arange(5)}, + df = pd.DataFrame({"date": pd.date_range("2015-01-01", freq="W", periods=5), + "a": np.arange(5)}, index=pd.MultiIndex.from_arrays([ [1, 2, 3, 4, 5], - pd.date_range('2015-01-01', freq='W', periods=5)], - names=['v', 'd'])) + pd.date_range("2015-01-01", freq="W", periods=5)], + names=["v", "d"])) df - df.resample('M', on='date').sum() + df.resample("M", on="date").sum() Similarly, if you instead want to resample by a datetimelike level of ``MultiIndex``, its name or location can be passed to the @@ -1768,7 +1768,7 @@ level of ``MultiIndex``, its name or location can be passed to the .. ipython:: python - df.resample('M', level='d').sum() + df.resample("M", level="d").sum() .. _timeseries.iterating-label: @@ -1782,14 +1782,14 @@ natural and functions similarly to :py:func:`itertools.groupby`: small = pd.Series( range(6), - index=pd.to_datetime(['2017-01-01T00:00:00', - '2017-01-01T00:30:00', - '2017-01-01T00:31:00', - '2017-01-01T01:00:00', - '2017-01-01T03:00:00', - '2017-01-01T03:05:00']) + index=pd.to_datetime(["2017-01-01T00:00:00", + "2017-01-01T00:30:00", + "2017-01-01T00:31:00", + "2017-01-01T01:00:00", + "2017-01-01T03:00:00", + "2017-01-01T03:05:00"]) ) - resampled = small.resample('H') + resampled = small.resample("H") for name, group in resampled: print("Group: ", name) @@ -1811,45 +1811,45 @@ For example: .. ipython:: python - start, end = '2000-10-01 23:30:00', '2000-10-02 00:30:00' - middle = '2000-10-02 00:00:00' - rng = pd.date_range(start, end, freq='7min') + start, end = "2000-10-01 23:30:00", "2000-10-02 00:30:00" + middle = "2000-10-02 00:00:00" + rng = pd.date_range(start, end, freq="7min") ts = pd.Series(np.arange(len(rng)) * 3, index=rng) ts -Here we can see that, when using ``origin`` with its default value (``'start_day'``), the result after ``'2000-10-02 00:00:00'`` are not identical depending on the start of time series: +Here we can see that, when using ``origin`` with its default value (``"start_day"``), the result after ``"2000-10-02 00:00:00"`` are not identical depending on the start of time series: .. ipython:: python - ts.resample('17min', origin='start_day').sum() - ts[middle:end].resample('17min', origin='start_day').sum() + ts.resample("17min", origin="start_day").sum() + ts[middle:end].resample("17min", origin="start_day").sum() -Here we can see that, when setting ``origin`` to ``'epoch'``, the result after ``'2000-10-02 00:00:00'`` are identical depending on the start of time series: +Here we can see that, when setting ``origin`` to ``"epoch"``, the result after ``"2000-10-02 00:00:00"`` are identical depending on the start of time series: .. ipython:: python - ts.resample('17min', origin='epoch').sum() - ts[middle:end].resample('17min', origin='epoch').sum() + ts.resample("17min", origin="epoch").sum() + ts[middle:end].resample("17min", origin="epoch").sum() If needed you can use a custom timestamp for ``origin``: .. ipython:: python - ts.resample('17min', origin='2001-01-01').sum() - ts[middle:end].resample('17min', origin=pd.Timestamp('2001-01-01')).sum() + ts.resample("17min", origin="2001-01-01").sum() + ts[middle:end].resample("17min", origin=pd.Timestamp("2001-01-01")).sum() If needed you can just adjust the bins with an ``offset`` Timedelta that would be added to the default ``origin``. Those two examples are equivalent for this time series: .. ipython:: python - ts.resample('17min', origin='start').sum() - ts.resample('17min', offset='23h30min').sum() + ts.resample("17min", origin="start").sum() + ts.resample("17min", offset="23h30min").sum() -Note the use of ``'start'`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries. +Note the use of ``"start"`` for ``origin`` on the last example. In that case, ``origin`` will be set to the first value of the timeseries. .. _timeseries.periods: @@ -1869,37 +1869,37 @@ Because ``freq`` represents a span of ``Period``, it cannot be negative like "-3 .. ipython:: python - pd.Period('2012', freq='A-DEC') + pd.Period("2012", freq="A-DEC") - pd.Period('2012-1-1', freq='D') + pd.Period("2012-1-1", freq="D") - pd.Period('2012-1-1 19:00', freq='H') + pd.Period("2012-1-1 19:00", freq="H") - pd.Period('2012-1-1 19:00', freq='5H') + pd.Period("2012-1-1 19:00", freq="5H") Adding and subtracting integers from periods shifts the period by its own frequency. Arithmetic is not allowed between ``Period`` with different ``freq`` (span). .. ipython:: python - p = pd.Period('2012', freq='A-DEC') + p = pd.Period("2012", freq="A-DEC") p + 1 p - 3 - p = pd.Period('2012-01', freq='2M') + p = pd.Period("2012-01", freq="2M") p + 2 p - 1 @okexcept - p == pd.Period('2012-01', freq='3M') + p == pd.Period("2012-01", freq="3M") If ``Period`` freq is daily or higher (``D``, ``H``, ``T``, ``S``, ``L``, ``U``, ``N``), ``offsets`` and ``timedelta``-like can be added if the result can have the same freq. Otherwise, ``ValueError`` will be raised. .. ipython:: python - p = pd.Period('2014-07-01 09:00', freq='H') + p = pd.Period("2014-07-01 09:00", freq="H") p + pd.offsets.Hour(2) p + datetime.timedelta(minutes=120) - p + np.timedelta64(7200, 's') + p + np.timedelta64(7200, "s") .. code-block:: ipython @@ -1912,7 +1912,7 @@ If ``Period`` has other frequencies, only the same ``offsets`` can be added. Oth .. ipython:: python - p = pd.Period('2014-07', freq='M') + p = pd.Period("2014-07", freq="M") p + pd.offsets.MonthEnd(3) .. code-block:: ipython @@ -1927,7 +1927,7 @@ return the number of frequency units between them: .. ipython:: python - pd.Period('2012', freq='A-DEC') - pd.Period('2002', freq='A-DEC') + pd.Period("2012", freq="A-DEC") - pd.Period("2002", freq="A-DEC") PeriodIndex and period_range ~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -1936,21 +1936,21 @@ which can be constructed using the ``period_range`` convenience function: .. ipython:: python - prng = pd.period_range('1/1/2011', '1/1/2012', freq='M') + prng = pd.period_range("1/1/2011", "1/1/2012", freq="M") prng The ``PeriodIndex`` constructor can also be used directly: .. ipython:: python - pd.PeriodIndex(['2011-1', '2011-2', '2011-3'], freq='M') + pd.PeriodIndex(["2011-1", "2011-2", "2011-3"], freq="M") Passing multiplied frequency outputs a sequence of ``Period`` which has multiplied span. .. ipython:: python - pd.period_range(start='2014-01', freq='3M', periods=4) + pd.period_range(start="2014-01", freq="3M", periods=4) If ``start`` or ``end`` are ``Period`` objects, they will be used as anchor endpoints for a ``PeriodIndex`` with frequency matching that of the @@ -1958,8 +1958,8 @@ endpoints for a ``PeriodIndex`` with frequency matching that of the .. ipython:: python - pd.period_range(start=pd.Period('2017Q1', freq='Q'), - end=pd.Period('2017Q2', freq='Q'), freq='M') + pd.period_range(start=pd.Period("2017Q1", freq="Q"), + end=pd.Period("2017Q2", freq="Q"), freq="M") Just like ``DatetimeIndex``, a ``PeriodIndex`` can also be used to index pandas objects: @@ -1973,11 +1973,11 @@ objects: .. ipython:: python - idx = pd.period_range('2014-07-01 09:00', periods=5, freq='H') + idx = pd.period_range("2014-07-01 09:00", periods=5, freq="H") idx idx + pd.offsets.Hour(2) - idx = pd.period_range('2014-07', periods=5, freq='M') + idx = pd.period_range("2014-07", periods=5, freq="M") idx idx + pd.offsets.MonthEnd(3) @@ -1996,7 +1996,7 @@ The ``period`` dtype holds the ``freq`` attribute and is represented with .. ipython:: python - pi = pd.period_range('2016-01-01', periods=3, freq='M') + pi = pd.period_range("2016-01-01", periods=3, freq="M") pi pi.dtype @@ -2007,15 +2007,15 @@ The ``period`` dtype can be used in ``.astype(...)``. It allows one to change th .. ipython:: python # change monthly freq to daily freq - pi.astype('period[D]') + pi.astype("period[D]") # convert to DatetimeIndex - pi.astype('datetime64[ns]') + pi.astype("datetime64[ns]") # convert to PeriodIndex - dti = pd.date_range('2011-01-01', freq='M', periods=3) + dti = pd.date_range("2011-01-01", freq="M", periods=3) dti - dti.astype('period[M]') + dti.astype("period[M]") PeriodIndex partial string indexing @@ -2029,42 +2029,42 @@ You can pass in dates and strings to ``Series`` and ``DataFrame`` with ``PeriodI .. ipython:: python - ps['2011-01'] + ps["2011-01"] ps[datetime.datetime(2011, 12, 25):] - ps['10/31/2011':'12/31/2011'] + ps["10/31/2011":"12/31/2011"] Passing a string representing a lower frequency than ``PeriodIndex`` returns partial sliced data. .. ipython:: python :okwarning: - ps['2011'] + ps["2011"] dfp = pd.DataFrame(np.random.randn(600, 1), - columns=['A'], - index=pd.period_range('2013-01-01 9:00', + columns=["A"], + index=pd.period_range("2013-01-01 9:00", periods=600, - freq='T')) + freq="T")) dfp - dfp['2013-01-01 10H'] + dfp["2013-01-01 10H"] As with ``DatetimeIndex``, the endpoints will be included in the result. The example below slices data starting from 10:00 to 11:59. .. ipython:: python - dfp['2013-01-01 10H':'2013-01-01 11H'] + dfp["2013-01-01 10H":"2013-01-01 11H"] Frequency conversion and resampling with PeriodIndex ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ The frequency of ``Period`` and ``PeriodIndex`` can be converted via the ``asfreq`` -method. Let's start with the fiscal year 2011, ending in December: +method. Let"s start with the fiscal year 2011, ending in December: .. ipython:: python - p = pd.Period('2011', freq='A-DEC') + p = pd.Period("2011", freq="A-DEC") p We can convert it to a monthly frequency. Using the ``how`` parameter, we can @@ -2072,16 +2072,16 @@ specify whether to return the starting or ending month: .. ipython:: python - p.asfreq('M', how='start') + p.asfreq("M", how="start") - p.asfreq('M', how='end') + p.asfreq("M", how="end") -The shorthands 's' and 'e' are provided for convenience: +The shorthands "s" and "e" are provided for convenience: .. ipython:: python - p.asfreq('M', 's') - p.asfreq('M', 'e') + p.asfreq("M", "s") + p.asfreq("M", "e") Converting to a "super-period" (e.g., annual frequency is a super-period of quarterly frequency) automatically returns the super-period that includes the @@ -2089,9 +2089,9 @@ input period: .. ipython:: python - p = pd.Period('2011-12', freq='M') + p = pd.Period("2011-12", freq="M") - p.asfreq('A-NOV') + p.asfreq("A-NOV") Note that since we converted to an annual frequency that ends the year in November, the monthly period of December 2011 is actually in the 2012 A-NOV @@ -2110,21 +2110,21 @@ frequencies ``Q-JAN`` through ``Q-DEC``. .. ipython:: python - p = pd.Period('2012Q1', freq='Q-DEC') + p = pd.Period("2012Q1", freq="Q-DEC") - p.asfreq('D', 's') + p.asfreq("D", "s") - p.asfreq('D', 'e') + p.asfreq("D", "e") ``Q-MAR`` defines fiscal year end in March: .. ipython:: python - p = pd.Period('2011Q4', freq='Q-MAR') + p = pd.Period("2011Q4", freq="Q-MAR") - p.asfreq('D', 's') + p.asfreq("D", "s") - p.asfreq('D', 'e') + p.asfreq("D", "e") .. _timeseries.interchange: @@ -2136,7 +2136,7 @@ and vice-versa using ``to_timestamp``: .. ipython:: python - rng = pd.date_range('1/1/2012', periods=5, freq='M') + rng = pd.date_range("1/1/2012", periods=5, freq="M") ts = pd.Series(np.random.randn(len(rng)), index=rng) @@ -2148,12 +2148,12 @@ and vice-versa using ``to_timestamp``: ps.to_timestamp() -Remember that 's' and 'e' can be used to return the timestamps at the start or +Remember that "s" and "e" can be used to return the timestamps at the start or end of the period: .. ipython:: python - ps.to_timestamp('D', how='s') + ps.to_timestamp("D", how="s") Converting between period and timestamp enables some convenient arithmetic functions to be used. In the following example, we convert a quarterly @@ -2162,11 +2162,11 @@ the quarter end: .. ipython:: python - prng = pd.period_range('1990Q1', '2000Q4', freq='Q-NOV') + prng = pd.period_range("1990Q1", "2000Q4", freq="Q-NOV") ts = pd.Series(np.random.randn(len(prng)), prng) - ts.index = (prng.asfreq('M', 'e') + 1).asfreq('H', 's') + 9 + ts.index = (prng.asfreq("M", "e") + 1).asfreq("H", "s") + 9 ts.head() @@ -2180,7 +2180,7 @@ then you can use a ``PeriodIndex`` and/or ``Series`` of ``Periods`` to do comput .. ipython:: python - span = pd.period_range('1215-01-01', '1381-01-01', freq='D') + span = pd.period_range("1215-01-01", "1381-01-01", freq="D") span To convert from an ``int64`` based YYYYMMDD representation. @@ -2192,7 +2192,7 @@ To convert from an ``int64`` based YYYYMMDD representation. def conv(x): return pd.Period(year=x // 10000, month=x // 100 % 100, - day=x % 100, freq='D') + day=x % 100, freq="D") s.apply(conv) s.apply(conv)[2] @@ -2221,7 +2221,7 @@ By default, pandas objects are time zone unaware: .. ipython:: python - rng = pd.date_range('3/6/2012 00:00', periods=15, freq='D') + rng = pd.date_range("3/6/2012 00:00", periods=15, freq="D") rng.tz is None To localize these dates to a time zone (assign a particular time zone to a naive date), @@ -2233,7 +2233,7 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string * In ``pytz`` you can find a list of common (and less common) time zones using ``from pytz import common_timezones, all_timezones``. -* ``dateutil`` uses the OS time zones so there isn't a fixed list available. For +* ``dateutil`` uses the OS time zones so there isn"t a fixed list available. For common zones, the names are the same as ``pytz``. .. ipython:: python @@ -2241,17 +2241,17 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string import dateutil # pytz - rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D', - tz='Europe/London') + rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D", + tz="Europe/London") rng_pytz.tz # dateutil - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D') - rng_dateutil = rng_dateutil.tz_localize('dateutil/Europe/London') + rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D") + rng_dateutil = rng_dateutil.tz_localize("dateutil/Europe/London") rng_dateutil.tz # dateutil - utc special case - rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', + rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=dateutil.tz.tzutc()) rng_utc.tz @@ -2260,7 +2260,7 @@ To return ``dateutil`` time zone objects, append ``dateutil/`` before the string .. ipython:: python # datetime.timezone - rng_utc = pd.date_range('3/6/2012 00:00', periods=3, freq='D', + rng_utc = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=datetime.timezone.utc) rng_utc.tz @@ -2273,14 +2273,14 @@ zones objects explicitly first. import pytz # pytz - tz_pytz = pytz.timezone('Europe/London') - rng_pytz = pd.date_range('3/6/2012 00:00', periods=3, freq='D') + tz_pytz = pytz.timezone("Europe/London") + rng_pytz = pd.date_range("3/6/2012 00:00", periods=3, freq="D") rng_pytz = rng_pytz.tz_localize(tz_pytz) rng_pytz.tz == tz_pytz # dateutil - tz_dateutil = dateutil.tz.gettz('Europe/London') - rng_dateutil = pd.date_range('3/6/2012 00:00', periods=3, freq='D', + tz_dateutil = dateutil.tz.gettz("Europe/London") + rng_dateutil = pd.date_range("3/6/2012 00:00", periods=3, freq="D", tz=tz_dateutil) rng_dateutil.tz == tz_dateutil @@ -2289,7 +2289,7 @@ you can use the ``tz_convert`` method. .. ipython:: python - rng_pytz.tz_convert('US/Eastern') + rng_pytz.tz_convert("US/Eastern") .. note:: @@ -2301,16 +2301,16 @@ you can use the ``tz_convert`` method. .. ipython:: python - dti = pd.date_range('2019-01-01', periods=3, freq='D', tz='US/Pacific') + dti = pd.date_range("2019-01-01", periods=3, freq="D", tz="US/Pacific") dti.tz - ts = pd.Timestamp('2019-01-01', tz='US/Pacific') + ts = pd.Timestamp("2019-01-01", tz="US/Pacific") ts.tz .. warning:: Be wary of conversions between libraries. For some time zones, ``pytz`` and ``dateutil`` have different definitions of the zone. This is more of a problem for unusual time zones than for - 'standard' zones like ``US/Eastern``. + "standard" zones like ``US/Eastern``. .. warning:: @@ -2323,14 +2323,14 @@ you can use the ``tz_convert`` method. For ``pytz`` time zones, it is incorrect to pass a time zone object directly into the ``datetime.datetime`` constructor - (e.g., ``datetime.datetime(2011, 1, 1, tz=pytz.timezone('US/Eastern'))``. + (e.g., ``datetime.datetime(2011, 1, 1, tz=pytz.timezone("US/Eastern"))``. Instead, the datetime needs to be localized using the ``localize`` method on the ``pytz`` time zone object. .. warning:: Be aware that for times in the future, correct conversion between time zones - (and UTC) cannot be guaranteed by any time zone library because a timezone's + (and UTC) cannot be guaranteed by any time zone library because a timezone"s offset from UTC may be changed by the respective government. .. warning:: @@ -2344,11 +2344,11 @@ you can use the ``tz_convert`` method. .. ipython:: python - d_2037 = '2037-03-31T010101' - d_2038 = '2038-03-31T010101' - DST = 'Europe/London' - assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz='GMT') - assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz='GMT') + d_2037 = "2037-03-31T010101" + d_2038 = "2038-03-31T010101" + DST = "Europe/London" + assert pd.Timestamp(d_2037, tz=DST) != pd.Timestamp(d_2037, tz="GMT") + assert pd.Timestamp(d_2038, tz=DST) == pd.Timestamp(d_2038, tz="GMT") Under the hood, all timestamps are stored in UTC. Values from a time zone aware :class:`DatetimeIndex` or :class:`Timestamp` will have their fields (day, hour, minute, etc.) @@ -2357,8 +2357,8 @@ still considered to be equal even if they are in different time zones: .. ipython:: python - rng_eastern = rng_utc.tz_convert('US/Eastern') - rng_berlin = rng_utc.tz_convert('Europe/Berlin') + rng_eastern = rng_utc.tz_convert("US/Eastern") + rng_berlin = rng_utc.tz_convert("Europe/Berlin") rng_eastern[2] rng_berlin[2] @@ -2369,9 +2369,9 @@ Operations between :class:`Series` in different time zones will yield UTC .. ipython:: python - ts_utc = pd.Series(range(3), pd.date_range('20130101', periods=3, tz='UTC')) - eastern = ts_utc.tz_convert('US/Eastern') - berlin = ts_utc.tz_convert('Europe/Berlin') + ts_utc = pd.Series(range(3), pd.date_range("20130101", periods=3, tz="UTC")) + eastern = ts_utc.tz_convert("US/Eastern") + berlin = ts_utc.tz_convert("Europe/Berlin") result = eastern + berlin result result.index @@ -2382,14 +2382,14 @@ To remove time zone information, use ``tz_localize(None)`` or ``tz_convert(None) .. ipython:: python - didx = pd.date_range(start='2014-08-01 09:00', freq='H', - periods=3, tz='US/Eastern') + didx = pd.date_range(start="2014-08-01 09:00", freq="H", + periods=3, tz="US/Eastern") didx didx.tz_localize(None) didx.tz_convert(None) - # tz_convert(None) is identical to tz_convert('UTC').tz_localize(None) - didx.tz_convert('UTC').tz_localize(None) + # tz_convert(None) is identical to tz_convert("UTC").tz_localize(None) + didx.tz_convert("UTC").tz_localize(None) .. _timeseries.fold: @@ -2416,9 +2416,9 @@ control over how they are handled. .. ipython:: python pd.Timestamp(datetime.datetime(2019, 10, 27, 1, 30, 0, 0), - tz='dateutil/Europe/London', fold=0) + tz="dateutil/Europe/London", fold=0) pd.Timestamp(year=2019, month=10, day=27, hour=1, minute=30, - tz='dateutil/Europe/London', fold=1) + tz="dateutil/Europe/London", fold=1) .. _timeseries.timezone_ambiguous: @@ -2429,30 +2429,30 @@ Ambiguous times when localizing because daylight savings time (DST) in a local time zone causes some times to occur twice within one day ("clocks fall back"). The following options are available: -* ``'raise'``: Raises a ``pytz.AmbiguousTimeError`` (the default behavior) -* ``'infer'``: Attempt to determine the correct offset base on the monotonicity of the timestamps -* ``'NaT'``: Replaces ambiguous times with ``NaT`` +* ``"raise"``: Raises a ``pytz.AmbiguousTimeError`` (the default behavior) +* ``"infer"``: Attempt to determine the correct offset base on the monotonicity of the timestamps +* ``"NaT"``: Replaces ambiguous times with ``NaT`` * ``bool``: ``True`` represents a DST time, ``False`` represents non-DST time. An array-like of ``bool`` values is supported for a sequence of times. .. ipython:: python - rng_hourly = pd.DatetimeIndex(['11/06/2011 00:00', '11/06/2011 01:00', - '11/06/2011 01:00', '11/06/2011 02:00']) + rng_hourly = pd.DatetimeIndex(["11/06/2011 00:00", "11/06/2011 01:00", + "11/06/2011 01:00", "11/06/2011 02:00"]) -This will fail as there are ambiguous times (``'11/06/2011 01:00'``) +This will fail as there are ambiguous times (``"11/06/2011 01:00"``) .. code-block:: ipython - In [2]: rng_hourly.tz_localize('US/Eastern') - AmbiguousTimeError: Cannot infer dst time from Timestamp('2011-11-06 01:00:00'), try using the 'ambiguous' argument + In [2]: rng_hourly.tz_localize("US/Eastern") + AmbiguousTimeError: Cannot infer dst time from Timestamp("2011-11-06 01:00:00"), try using the "ambiguous" argument Handle these ambiguous times by specifying the following. .. ipython:: python - rng_hourly.tz_localize('US/Eastern', ambiguous='infer') - rng_hourly.tz_localize('US/Eastern', ambiguous='NaT') - rng_hourly.tz_localize('US/Eastern', ambiguous=[True, True, False, False]) + rng_hourly.tz_localize("US/Eastern", ambiguous="infer") + rng_hourly.tz_localize("US/Eastern", ambiguous="NaT") + rng_hourly.tz_localize("US/Eastern", ambiguous=[True, True, False, False]) .. _timeseries.timezone_nonexistent: @@ -2463,22 +2463,22 @@ A DST transition may also shift the local time ahead by 1 hour creating nonexist local times ("clocks spring forward"). The behavior of localizing a timeseries with nonexistent times can be controlled by the ``nonexistent`` argument. The following options are available: -* ``'raise'``: Raises a ``pytz.NonExistentTimeError`` (the default behavior) -* ``'NaT'``: Replaces nonexistent times with ``NaT`` -* ``'shift_forward'``: Shifts nonexistent times forward to the closest real time -* ``'shift_backward'``: Shifts nonexistent times backward to the closest real time +* ``"raise"``: Raises a ``pytz.NonExistentTimeError`` (the default behavior) +* ``"NaT"``: Replaces nonexistent times with ``NaT`` +* ``"shift_forward"``: Shifts nonexistent times forward to the closest real time +* ``"shift_backward"``: Shifts nonexistent times backward to the closest real time * timedelta object: Shifts nonexistent times by the timedelta duration .. ipython:: python - dti = pd.date_range(start='2015-03-29 02:30:00', periods=3, freq='H') + dti = pd.date_range(start="2015-03-29 02:30:00", periods=3, freq="H") # 2:30 is a nonexistent time Localization of nonexistent times will raise an error by default. .. code-block:: ipython - In [2]: dti.tz_localize('Europe/Warsaw') + In [2]: dti.tz_localize("Europe/Warsaw") NonExistentTimeError: 2015-03-29 02:30:00 Transform nonexistent times to ``NaT`` or shift the times. @@ -2486,10 +2486,10 @@ Transform nonexistent times to ``NaT`` or shift the times. .. ipython:: python dti - dti.tz_localize('Europe/Warsaw', nonexistent='shift_forward') - dti.tz_localize('Europe/Warsaw', nonexistent='shift_backward') - dti.tz_localize('Europe/Warsaw', nonexistent=pd.Timedelta(1, unit='H')) - dti.tz_localize('Europe/Warsaw', nonexistent='NaT') + dti.tz_localize("Europe/Warsaw", nonexistent="shift_forward") + dti.tz_localize("Europe/Warsaw", nonexistent="shift_backward") + dti.tz_localize("Europe/Warsaw", nonexistent=pd.Timedelta(1, unit="H")) + dti.tz_localize("Europe/Warsaw", nonexistent="NaT") .. _timeseries.timezone_series: @@ -2502,7 +2502,7 @@ represented with a dtype of ``datetime64[ns]``. .. ipython:: python - s_naive = pd.Series(pd.date_range('20130101', periods=3)) + s_naive = pd.Series(pd.date_range("20130101", periods=3)) s_naive A :class:`Series` with a time zone **aware** values is @@ -2510,7 +2510,7 @@ represented with a dtype of ``datetime64[ns, tz]`` where ``tz`` is the time zone .. ipython:: python - s_aware = pd.Series(pd.date_range('20130101', periods=3, tz='US/Eastern')) + s_aware = pd.Series(pd.date_range("20130101", periods=3, tz="US/Eastern")) s_aware Both of these :class:`Series` time zone information @@ -2520,7 +2520,7 @@ For example, to localize and convert a naive stamp to time zone aware. .. ipython:: python - s_naive.dt.tz_localize('UTC').dt.tz_convert('US/Eastern') + s_naive.dt.tz_localize("UTC").dt.tz_convert("US/Eastern") Time zone information can also be manipulated using the ``astype`` method. This method can localize and convert time zone naive timestamps or @@ -2529,13 +2529,13 @@ convert time zone aware timestamps. .. ipython:: python # localize and convert a naive time zone - s_naive.astype('datetime64[ns, US/Eastern]') + s_naive.astype("datetime64[ns, US/Eastern]") # make an aware tz naive - s_aware.astype('datetime64[ns]') + s_aware.astype("datetime64[ns]") # convert to a new time zone - s_aware.astype('datetime64[ns, CET]') + s_aware.astype("datetime64[ns, CET]") .. note:: @@ -2561,4 +2561,4 @@ convert time zone aware timestamps. .. ipython:: python - s_aware.to_numpy(dtype='datetime64[ns]') + s_aware.to_numpy(dtype="datetime64[ns]")