Skip to content

Commit 0802d1c

Browse files
committed
CLN: Py2/3-compatible dict keys/items/values.
+ many other fixups
1 parent a038653 commit 0802d1c

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

48 files changed

+142
-146
lines changed

doc/make.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -259,7 +259,7 @@ def _get_config():
259259
func = funcd.get(arg)
260260
if func is None:
261261
raise SystemExit('Do not know how to handle %s; valid args are %s' % (
262-
arg, funcd.keys()))
262+
arg, list(funcd.keys())))
263263
func()
264264
else:
265265
small_docs = False

pandas/core/array.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,6 @@
22
Isolate pandas's exposure to NumPy
33
"""
44

5-
from pandas.util import compat
65
import numpy as np
76
import six
87

@@ -18,7 +17,7 @@
1817

1918
_lift_types = []
2019

21-
for _k, _v in compat.iteritems(_dtypes):
20+
for _k, _v in _dtypes.items():
2221
for _i in _v:
2322
_lift_types.append(_k + str(_i))
2423

pandas/core/common.py

Lines changed: 8 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,10 @@
22
Misc tools for implementing data structures
33
"""
44

5-
from pandas.util.py3compat import range, long
6-
import itertools
75
import re
86
from datetime import datetime
7+
import codecs
8+
import csv
99

1010
from numpy.lib.format import read_array, write_array
1111
import numpy as np
@@ -15,15 +15,13 @@
1515
import pandas.tslib as tslib
1616

1717
from pandas.util import py3compat
18-
import codecs
19-
import csv
18+
from pandas.util.py3compat import StringIO, BytesIO, range, long
19+
from six.moves import zip, map
20+
import six
2021

21-
from pandas.util.py3compat import StringIO, BytesIO
2222

2323
from pandas.core.config import get_option
2424
from pandas.core import array as pa
25-
import six
26-
from six.moves import map
2725

2826
# XXX: HACK for NumPy 1.5.1 to suppress warnings
2927
try:
@@ -1366,7 +1364,7 @@ def iterpairs(seq):
13661364
seq_it_next = iter(seq)
13671365
next(seq_it_next)
13681366

1369-
return itertools.izip(seq_it, seq_it_next)
1367+
return zip(seq_it, seq_it_next)
13701368

13711369

13721370
def split_ranges(mask):
@@ -1992,7 +1990,7 @@ def _pprint_dict(seq, _nest_lvl=0,**kwds):
19921990

19931991
nitems = get_option("max_seq_items") or len(seq)
19941992

1995-
for k, v in seq.items()[:nitems]:
1993+
for k, v in list(seq.items())[:nitems]:
19961994
pairs.append(pfmt % (pprint_thing(k,_nest_lvl+1,**kwds),
19971995
pprint_thing(v,_nest_lvl+1,**kwds)))
19981996

@@ -2048,7 +2046,7 @@ def as_escaped_unicode(thing,escape_chars=escape_chars):
20482046
translate.update(escape_chars)
20492047
else:
20502048
translate = escape_chars
2051-
escape_chars = escape_chars.keys()
2049+
escape_chars = list(escape_chars.keys())
20522050
else:
20532051
escape_chars = escape_chars or tuple()
20542052
for c in escape_chars:

pandas/core/config.py

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -128,8 +128,8 @@ def _set_option(*args, **kwargs):
128128

129129
# if 1 kwarg then it must be silent=True or silent=False
130130
if nkwargs:
131-
k, = kwargs.keys()
132-
v, = kwargs.values()
131+
k, = list(kwargs.keys())
132+
v, = list(kwargs.values())
133133

134134
if k != 'silent':
135135
raise ValueError("the only allowed keyword argument is 'silent', "
@@ -209,7 +209,7 @@ def __getattr__(self, key):
209209
return _get_option(prefix)
210210

211211
def __dir__(self):
212-
return self.d.keys()
212+
return list(self.d.keys())
213213

214214
# For user convenience, we'd like to have the available options described
215215
# in the docstring. For dev convenience we'd like to generate the docstrings
@@ -232,7 +232,7 @@ def __call__(self, *args, **kwds):
232232
@property
233233
def __doc__(self):
234234
opts_desc = _describe_option('all', _print_desc=False)
235-
opts_list = pp_options_list(_registered_options.keys())
235+
opts_list = pp_options_list(list(_registered_options.keys()))
236236
return self.__doc_tmpl__.format(opts_desc=opts_desc,
237237
opts_list=opts_list)
238238

@@ -351,7 +351,7 @@ def __init__(self, *args):
351351
errmsg = "Need to invoke as option_context(pat,val,[(pat,val),..))."
352352
raise AssertionError(errmsg)
353353

354-
ops = zip(args[::2], args[1::2])
354+
ops = list(zip(args[::2], args[1::2]))
355355
undo = []
356356
for pat, val in ops:
357357
undo.append((pat, _get_option(pat, silent=True)))

pandas/core/frame.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -496,7 +496,7 @@ def _init_dict(self, data, index, columns, dtype=None):
496496
data = dict((k, v) for k, v in compat.iteritems(data) if k in columns)
497497

498498
if index is None:
499-
index = extract_index(data.values())
499+
index = extract_index(list(data.values()))
500500
else:
501501
index = _ensure_index(index)
502502

@@ -521,9 +521,9 @@ def _init_dict(self, data, index, columns, dtype=None):
521521
data_names.append(k)
522522
arrays.append(v)
523523
else:
524-
keys = data.keys()
524+
keys = list(data.keys())
525525
if not isinstance(data, OrderedDict):
526-
keys = _try_sort(data.keys())
526+
keys = _try_sort(list(data.keys()))
527527
columns = data_names = Index(keys)
528528
arrays = [data[k] for k in columns]
529529

@@ -954,10 +954,10 @@ def from_dict(cls, data, orient='columns', dtype=None):
954954
if orient == 'index':
955955
if len(data) > 0:
956956
# TODO speed up Series case
957-
if isinstance(data.values()[0], (Series, dict)):
957+
if isinstance(list(data.values())[0], (Series, dict)):
958958
data = _from_nested_dict(data)
959959
else:
960-
data, index = data.values(), data.keys()
960+
data, index = list(data.values()), list(data.keys())
961961
elif orient != 'columns': # pragma: no cover
962962
raise ValueError('only recognize index or columns for orient')
963963

@@ -3600,7 +3600,7 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
36003600
to_replace = regex
36013601
regex = True
36023602

3603-
items = to_replace.items()
3603+
items = list(to_replace.items())
36043604
keys, values = zip(*items)
36053605

36063606
are_mappings = [isinstance(v, (dict, Series)) for v in values]
@@ -3615,8 +3615,8 @@ def replace(self, to_replace=None, value=None, inplace=False, limit=None,
36153615
value_dict = {}
36163616

36173617
for k, v in items:
3618-
to_rep_dict[k] = v.keys()
3619-
value_dict[k] = v.values()
3618+
to_rep_dict[k] = list(v.keys())
3619+
value_dict[k] = list(v.values())
36203620

36213621
to_replace, value = to_rep_dict, value_dict
36223622
else:
@@ -5735,7 +5735,7 @@ def extract_index(data):
57355735
indexes.append(v.index)
57365736
elif isinstance(v, dict):
57375737
have_dicts = True
5738-
indexes.append(v.keys())
5738+
indexes.append(list(v.keys()))
57395739
elif isinstance(v, (list, tuple, np.ndarray)):
57405740
have_raw_arrays = True
57415741
raw_lengths.append(len(v))
@@ -5895,7 +5895,7 @@ def _list_of_series_to_arrays(data, columns, coerce_float=False, dtype=None):
58955895

58965896
def _list_of_dict_to_arrays(data, columns, coerce_float=False, dtype=None):
58975897
if columns is None:
5898-
gen = (x.keys() for x in data)
5898+
gen = (list(x.keys()) for x in data)
58995899
columns = lib.fast_unique_multiple_list_gen(gen)
59005900

59015901
# assure that they are of the base dict class and not of derived

pandas/core/groupby.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1438,8 +1438,8 @@ def aggregate(self, func_or_funcs, *args, **kwargs):
14381438

14391439
def _aggregate_multiple_funcs(self, arg):
14401440
if isinstance(arg, dict):
1441-
columns = arg.keys()
1442-
arg = arg.items()
1441+
columns = list(arg.keys())
1442+
arg = list(arg.items())
14431443
elif any(isinstance(x, (tuple, list)) for x in arg):
14441444
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
14451445
for x in arg]
@@ -1731,7 +1731,7 @@ def aggregate(self, arg, *args, **kwargs):
17311731
result[col] = colg.aggregate(agg_how)
17321732
keys.append(col)
17331733

1734-
if isinstance(result.values()[0], DataFrame):
1734+
if isinstance(list(result.values())[0], DataFrame):
17351735
from pandas.tools.merge import concat
17361736
result = concat([result[k] for k in keys], keys=keys, axis=1)
17371737
else:

pandas/core/index.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2704,7 +2704,7 @@ def _get_combined_index(indexes, intersect=False):
27042704

27052705

27062706
def _get_distinct_indexes(indexes):
2707-
return dict((id(x), x) for x in indexes).values()
2707+
return list(dict((id(x), x) for x in indexes).values())
27082708

27092709

27102710
def _union_indexes(indexes):

pandas/core/panel.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -284,7 +284,7 @@ def _init_dict(self, data, axes, dtype=None):
284284
data = OrderedDict((k, v) for k, v
285285
in compat.iteritems(data) if k in haxis)
286286
else:
287-
ks = data.keys()
287+
ks = list(data.keys())
288288
if not isinstance(data,OrderedDict):
289289
ks = _try_sort(ks)
290290
haxis = Index(ks)
@@ -360,7 +360,7 @@ def from_dict(cls, data, intersect=False, orient='items', dtype=None):
360360
raise ValueError('Orientation must be one of {items, minor}.')
361361

362362
d = cls._homogenize_dict(cls, data, intersect=intersect, dtype=dtype)
363-
ks = d['data'].keys()
363+
ks = list(d['data'].keys())
364364
if not isinstance(d['data'],OrderedDict):
365365
ks = list(sorted(ks))
366366
d[cls._info_axis] = Index(ks)

pandas/core/reshape.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -543,9 +543,9 @@ def _stack_multi_columns(frame, level=-1, dropna=True):
543543

544544
# tuple list excluding level for grouping columns
545545
if len(frame.columns.levels) > 2:
546-
tuples = zip(*[lev.values.take(lab)
546+
tuples = list(zip(*[lev.values.take(lab)
547547
for lev, lab in zip(this.columns.levels[:-1],
548-
this.columns.labels[:-1])])
548+
this.columns.labels[:-1])]))
549549
unique_groups = [key for key, _ in itertools.groupby(tuples)]
550550
new_names = this.columns.names[:-1]
551551
new_columns = MultiIndex.from_tuples(unique_groups, names=new_names)
@@ -747,8 +747,8 @@ def lreshape(data, groups, dropna=True, label=None):
747747
reshaped : DataFrame
748748
"""
749749
if isinstance(groups, dict):
750-
keys = groups.keys()
751-
values = groups.values()
750+
keys = list(groups.keys())
751+
values = list(groups.values())
752752
else:
753753
keys, values = zip(*groups)
754754

pandas/core/strings.py

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
import numpy as np
22

33
from six.moves import zip
4+
import six
45
from pandas.core.common import isnull
56
from pandas.core.series import Series
67
import re
@@ -282,16 +283,18 @@ def str_repeat(arr, repeats):
282283
if np.isscalar(repeats):
283284
def rep(x):
284285
try:
285-
return str.__mul__(x, repeats)
286+
return six.binary_type.__mul__(x, repeats)
286287
except TypeError:
287288
return six.text_type.__mul__(x, repeats)
289+
288290
return _na_map(rep, arr)
289291
else:
290292
def rep(x, r):
291293
try:
292-
return str.__mul__(x, r)
294+
return six.binary_type.__mul__(x, r)
293295
except TypeError:
294296
return six.text_type.__mul__(x, r)
297+
295298
repeats = np.asarray(repeats, dtype=object)
296299
result = lib.vec_binop(arr, repeats, rep)
297300
return result

pandas/io/common.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -10,21 +10,23 @@
1010

1111
if py3compat.PY3:
1212
from urllib.request import urlopen
13+
_urlopen = urlopen
1314
from urllib.parse import urlparse as parse_url
1415
import urllib.parse as compat_parse
15-
from urllib.parse import uses_relative, uses_netloc, uses_params
16+
from urllib.parse import uses_relative, uses_netloc, uses_params, urlencode
1617
from urllib.error import URLError
1718
from http.client import HTTPException
1819
else:
1920
from urllib2 import urlopen as _urlopen
21+
from urllib import urlencode
2022
from urlparse import urlparse as parse_url
2123
from urlparse import uses_relative, uses_netloc, uses_params
2224
from urllib2 import URLError
2325
from httplib import HTTPException
2426
from contextlib import contextmanager, closing
2527
from functools import wraps
2628

27-
@wraps(_urlopen)
29+
# @wraps(_urlopen)
2830
@contextmanager
2931
def urlopen(*args, **kwargs):
3032
with closing(_urlopen(*args, **kwargs)) as f:
@@ -80,8 +82,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
8082
"""
8183

8284
if _is_url(filepath_or_buffer):
83-
from urllib2 import urlopen
84-
filepath_or_buffer = urlopen(filepath_or_buffer)
85+
req = _urlopen(filepath_or_buffer)
8586
if py3compat.PY3: # pragma: no cover
8687
if encoding:
8788
errors = 'strict'
@@ -101,7 +102,7 @@ def get_filepath_or_buffer(filepath_or_buffer, encoding=None):
101102
raise ImportError("boto is required to handle s3 files")
102103
# Assuming AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
103104
# are environment variables
104-
parsed_url = urlparse.urlparse(filepath_or_buffer)
105+
parsed_url = parse_url(filepath_or_buffer)
105106
conn = boto.connect_s3()
106107
b = conn.get_bucket(parsed_url.netloc)
107108
k = boto.s3.key.Key(b)

pandas/io/data.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,6 @@
66
import warnings
77
import tempfile
88
import datetime as dt
9-
import urllib
109
import time
1110

1211
from collections import defaultdict
@@ -17,7 +16,7 @@
1716
from pandas import Panel, DataFrame, Series, read_csv, concat
1817
from pandas.core.common import PandasError
1918
from pandas.io.parsers import TextParser
20-
from pandas.io.common import urlopen, ZipFile
19+
from pandas.io.common import urlopen, ZipFile, urlencode
2120
from pandas.util.testing import _network_error_classes
2221
import six
2322
from six.moves import map, zip
@@ -115,7 +114,7 @@ def get_quote_yahoo(symbols):
115114

116115
# for codes see: http://www.gummy-stuff.org/Yahoo-data.htm
117116
request = ''.join(six.itervalues(_yahoo_codes)) # code request string
118-
header = _yahoo_codes.keys()
117+
header = list(_yahoo_codes.keys())
119118

120119
data = defaultdict(list)
121120

@@ -202,7 +201,7 @@ def _get_hist_google(sym, start, end, retry_count, pause):
202201
google_URL = 'http://www.google.com/finance/historical?'
203202

204203
# www.google.com/finance/historical?q=GOOG&startdate=Jun+9%2C+2011&enddate=Jun+8%2C+2013&output=csv
205-
url = google_URL + urllib.urlencode({"q": sym,
204+
url = google_URL + urlencode({"q": sym,
206205
"startdate": start.strftime('%b %d, '
207206
'%Y'),
208207
"enddate": end.strftime('%b %d, %Y'),

pandas/io/excel.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -5,14 +5,14 @@
55
#----------------------------------------------------------------------
66
# ExcelFile class
77

8-
from pandas.util.py3compat import range
98
import datetime
109
import numpy as np
1110

1211
from pandas.io.parsers import TextParser
1312
from pandas.tseries.period import Period
1413
from pandas import json
1514
from six.moves import map, zip, reduce
15+
from pandas.util.py3compat import range
1616
import six
1717

1818
def read_excel(path_or_buf, sheetname, kind=None, **kwds):

0 commit comments

Comments
 (0)