Skip to content
This repository was archived by the owner on Nov 22, 2022. It is now read-only.

Commit 897330a

Browse files
authored
Disallow missing, star and unused imports (#518)
- Resolves #516 - Resolves #517
1 parent 56aab84 commit 897330a

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

77 files changed

+395
-349
lines changed

mypy.ini

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1 +1,19 @@
11
[mypy]
2+
3+
[mypy-pyspark.cloudpickle.*]
4+
ignore_errors = True
5+
6+
[mypy-py4j.*]
7+
ignore_missing_imports = True
8+
9+
[mypy-numpy.*]
10+
ignore_missing_imports = True
11+
12+
[mypy-scipy.*]
13+
ignore_missing_imports = True
14+
15+
[mypy-pandas.*]
16+
ignore_missing_imports = True
17+
18+
[mypy-pyarrow]
19+
ignore_missing_imports = True

third_party/3/pyspark/__init__.pyi

Lines changed: 29 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -16,36 +16,49 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
# Stubs for pyspark (Python 3)
20-
#
21-
2219
from typing import Callable, Optional, TypeVar
2320

24-
from pyspark.status import *
25-
from pyspark.accumulators import (
21+
from pyspark.accumulators import ( # noqa: F401
2622
Accumulator as Accumulator,
2723
AccumulatorParam as AccumulatorParam,
2824
)
29-
from pyspark.broadcast import Broadcast as Broadcast
30-
from pyspark.conf import SparkConf as SparkConf
31-
from pyspark.context import SparkContext as SparkContext
32-
from pyspark.files import SparkFiles as SparkFiles
33-
from pyspark.profiler import BasicProfiler as BasicProfiler, Profiler as Profiler
34-
from pyspark.rdd import RDD as RDD, RDDBarrier as RDDBarrier
35-
from pyspark.serializers import (
25+
from pyspark.broadcast import Broadcast as Broadcast # noqa: F401
26+
from pyspark.conf import SparkConf as SparkConf # noqa: F401
27+
from pyspark.context import SparkContext as SparkContext # noqa: F401
28+
from pyspark.files import SparkFiles as SparkFiles # noqa: F401
29+
from pyspark.status import (
30+
StatusTracker as StatusTracker,
31+
SparkJobInfo as SparkJobInfo,
32+
SparkStageInfo as SparkStageInfo,
33+
) # noqa: F401
34+
from pyspark.profiler import ( # noqa: F401
35+
BasicProfiler as BasicProfiler,
36+
Profiler as Profiler,
37+
)
38+
from pyspark.rdd import RDD as RDD, RDDBarrier as RDDBarrier # noqa: F401
39+
from pyspark.serializers import ( # noqa: F401
3640
MarshalSerializer as MarshalSerializer,
3741
PickleSerializer as PickleSerializer,
3842
)
39-
from pyspark.storagelevel import StorageLevel as StorageLevel
40-
from pyspark.taskcontext import (
43+
from pyspark.status import ( # noqa: F401
44+
SparkJobInfo as SparkJobInfo,
45+
SparkStageInfo as SparkStageInfo,
46+
StatusTracker as StatusTracker,
47+
)
48+
from pyspark.storagelevel import StorageLevel as StorageLevel # noqa: F401
49+
from pyspark.taskcontext import ( # noqa: F401
4150
BarrierTaskContext as BarrierTaskContext,
4251
BarrierTaskInfo as BarrierTaskInfo,
4352
TaskContext as TaskContext,
4453
)
45-
from pyspark.util import InheritableThread as InheritableThread
54+
from pyspark.util import InheritableThread as InheritableThread # noqa: F401
4655

4756
# Compatiblity imports
48-
from pyspark.sql import SQLContext as SQLContext, HiveContext as HiveContext, Row as Row
57+
from pyspark.sql import ( # noqa: F401
58+
SQLContext as SQLContext,
59+
HiveContext as HiveContext,
60+
Row as Row,
61+
)
4962

5063
T = TypeVar("T")
5164
F = TypeVar("F", bound=Callable)
@@ -58,8 +71,3 @@ def copy_func(
5871
doc: Optional[str] = ...,
5972
) -> F: ...
6073
def keyword_only(func: F) -> F: ...
61-
62-
# Names in __all__ with no definition:
63-
# SparkJobInfo
64-
# SparkStageInfo
65-
# StatusTracker

third_party/3/pyspark/_typing.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
from typing import Any, Generic, Iterable, List, Optional, Sized, TypeVar, Union
19+
from typing import Iterable, Sized, TypeVar, Union
2020
from typing_extensions import Protocol
2121

2222
T = TypeVar("T", covariant=True)

third_party/3/pyspark/accumulators.pyi

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -29,7 +29,6 @@ T = TypeVar("T")
2929
U = TypeVar("U", bound=SupportsIAdd)
3030

3131
import socketserver as SocketServer
32-
from typing import Any
3332

3433
class Accumulator(Generic[T]):
3534
aid: int

third_party/3/pyspark/cloudpickle.pyi

Lines changed: 0 additions & 86 deletions
This file was deleted.

third_party/3/pyspark/conf.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
#
2121

2222
from typing import overload
23-
from typing import Any, List, Optional, Tuple
23+
from typing import List, Optional, Tuple
2424

2525
from py4j.java_gateway import JVMView, JavaObject # type: ignore[import]
2626

third_party/3/pyspark/context.pyi

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -26,7 +26,7 @@ from py4j.java_gateway import JavaGateway, JavaObject # type: ignore[import]
2626
from pyspark.accumulators import Accumulator, AccumulatorParam
2727
from pyspark.broadcast import Broadcast
2828
from pyspark.conf import SparkConf
29-
from pyspark.profiler import Profiler
29+
from pyspark.profiler import Profiler # noqa: F401
3030
from pyspark.resource.information import ResourceInformation
3131
from pyspark.rdd import RDD
3232
from pyspark.serializers import Serializer

third_party/3/pyspark/daemon.pyi

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,12 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
from pyspark.serializers import UTF8Deserializer as UTF8Deserializer, read_int as read_int, write_int as write_int, write_with_length as write_with_length # type: ignore[attr-defined]
19+
from pyspark.serializers import ( # noqa: F401
20+
UTF8Deserializer as UTF8Deserializer,
21+
read_int as read_int,
22+
write_int as write_int,
23+
write_with_length as write_with_length,
24+
)
2025
from typing import Any
2126

2227
def compute_real_exit_code(exit_code: Any): ...

third_party/3/pyspark/join.pyi

Lines changed: 1 addition & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -16,10 +16,7 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
# Stubs for pyspark.join (Python 3)
20-
#
21-
22-
from typing import Hashable, Iterable, List, Optional, Tuple, TypeVar
19+
from typing import Hashable, Iterable, Optional, Tuple, TypeVar
2320

2421
from pyspark.resultiterable import ResultIterable
2522
import pyspark.rdd

third_party/3/pyspark/ml/__init__.pyi

Lines changed: 6 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
from pyspark.ml import (
19+
from pyspark.ml import ( # noqa: F401
2020
classification as classification,
2121
clustering as clustering,
2222
evaluation as evaluation,
@@ -31,12 +31,15 @@ from pyspark.ml import (
3131
tuning as tuning,
3232
util as util,
3333
)
34-
from pyspark.ml.base import (
34+
from pyspark.ml.base import ( # noqa: F401
3535
Estimator as Estimator,
3636
Model as Model,
3737
PredictionModel as PredictionModel,
3838
Predictor as Predictor,
3939
Transformer as Transformer,
4040
UnaryTransformer as UnaryTransformer,
4141
)
42-
from pyspark.ml.pipeline import Pipeline as Pipeline, PipelineModel as PipelineModel
42+
from pyspark.ml.pipeline import ( # noqa: F401
43+
Pipeline as Pipeline,
44+
PipelineModel as PipelineModel,
45+
)

third_party/3/pyspark/ml/base.pyi

Lines changed: 14 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -18,35 +18,39 @@
1818

1919
from typing import overload
2020
from typing import (
21-
Any,
2221
Callable,
23-
Dict,
2422
Generic,
2523
Iterable,
2624
List,
2725
Optional,
2826
Tuple,
2927
Type,
30-
TypeVar,
3128
)
3229
from pyspark.ml._typing import M, P, T, ParamMap
3330

3431
import _thread
3532

3633
import abc
3734
from abc import abstractmethod
38-
from pyspark.ml.common import inherit_doc as inherit_doc
39-
from pyspark.ml.param import Params, Param
40-
from pyspark.ml.param.shared import *
41-
from pyspark.sql.column import Column
42-
from pyspark.sql.dataframe import DataFrame
43-
from pyspark.sql.functions import udf as udf
44-
from pyspark.sql.types import (
35+
from pyspark import since as since # noqa: F401
36+
from pyspark.ml.common import inherit_doc as inherit_doc # noqa: F401
37+
from pyspark.ml.param.shared import (
38+
HasFeaturesCol as HasFeaturesCol,
39+
HasInputCol as HasInputCol,
40+
HasLabelCol as HasLabelCol,
41+
HasOutputCol as HasOutputCol,
42+
HasPredictionCol as HasPredictionCol,
43+
Params as Params,
44+
)
45+
from pyspark.sql.functions import udf as udf # noqa: F401
46+
from pyspark.sql.types import ( # noqa: F401
4547
DataType,
4648
StructField as StructField,
4749
StructType as StructType,
4850
)
4951

52+
from pyspark.sql.dataframe import DataFrame
53+
5054
class _FitMultipleIterator:
5155
fitSingleModel: Callable[[int], Transformer]
5256
numModel: int

third_party/3/pyspark/ml/classification.pyi

Lines changed: 30 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -16,36 +16,48 @@
1616
# specific language governing permissions and limitations
1717
# under the License.
1818

19-
from typing import Any, Dict, List, Optional, Type, TypeVar
19+
from typing import Any, List, Optional, Type
2020
from pyspark.ml._typing import JM, M, P, T, ParamMap
2121

2222
import abc
2323
from abc import abstractmethod
24-
from pyspark.ml.base import Estimator, Model, Transformer, PredictionModel, Predictor
24+
from pyspark.ml import Estimator, Model, PredictionModel, Predictor, Transformer
2525
from pyspark.ml.base import _PredictorParams
26-
from pyspark.ml.linalg import Matrix, Vector
27-
from pyspark.ml.param.shared import *
26+
from pyspark.ml.param.shared import (
27+
HasAggregationDepth,
28+
HasBlockSize,
29+
HasElasticNetParam,
30+
HasFitIntercept,
31+
HasMaxIter,
32+
HasParallelism,
33+
HasProbabilityCol,
34+
HasRawPredictionCol,
35+
HasRegParam,
36+
HasSeed,
37+
HasSolver,
38+
HasStandardization,
39+
HasStepSize,
40+
HasThreshold,
41+
HasThresholds,
42+
HasTol,
43+
HasWeightCol,
44+
)
45+
from pyspark.ml.regression import _FactorizationMachinesParams
2846
from pyspark.ml.tree import (
2947
_DecisionTreeModel,
3048
_DecisionTreeParams,
31-
_TreeEnsembleModel,
32-
_RandomForestParams,
3349
_GBTParams,
3450
_HasVarianceImpurity,
51+
_RandomForestParams,
3552
_TreeClassifierParams,
36-
_TreeEnsembleParams,
37-
)
38-
from pyspark.ml.regression import (
39-
_FactorizationMachinesParams,
40-
DecisionTreeRegressionModel,
41-
)
42-
from pyspark.ml.util import *
43-
from pyspark.ml.wrapper import (
44-
JavaPredictionModel,
45-
JavaPredictor,
46-
JavaWrapper,
47-
JavaTransformer,
53+
_TreeEnsembleModel,
4854
)
55+
from pyspark.ml.util import HasTrainingSummary, JavaMLReadable, JavaMLWritable
56+
from pyspark.ml.wrapper import JavaPredictionModel, JavaPredictor, JavaWrapper
57+
58+
from pyspark.ml.linalg import Matrix, Vector
59+
from pyspark.ml.param import Param
60+
from pyspark.ml.regression import DecisionTreeRegressionModel
4961
from pyspark.sql.dataframe import DataFrame
5062

5163
class _ClassifierParams(HasRawPredictionCol, _PredictorParams): ...

0 commit comments

Comments
 (0)