diff --git a/mypy/checkexpr.py b/mypy/checkexpr.py
index 8df1a407ad0f..88968f9735bb 100644
--- a/mypy/checkexpr.py
+++ b/mypy/checkexpr.py
@@ -1784,11 +1784,17 @@ def visit_str_expr(self, e: StrExpr) -> Type:
 
     def visit_bytes_expr(self, e: BytesExpr) -> Type:
         """Type check a bytes literal (trivial)."""
-        return self.named_type('builtins.bytes')
+        typ = self.named_type('builtins.bytes')
+        if is_literal_type_like(self.type_context[-1]):
+            return LiteralType(value=e.value, fallback=typ)
+        return typ
 
     def visit_unicode_expr(self, e: UnicodeExpr) -> Type:
         """Type check a unicode literal (trivial)."""
-        return self.named_type('builtins.unicode')
+        typ = self.named_type('builtins.unicode')
+        if is_literal_type_like(self.type_context[-1]):
+            return LiteralType(value=e.value, fallback=typ)
+        return typ
 
     def visit_float_expr(self, e: FloatExpr) -> Type:
         """Type check a float literal (trivial)."""
diff --git a/mypy/exprtotype.py b/mypy/exprtotype.py
index f7e92f0bce7d..6528ed562508 100644
--- a/mypy/exprtotype.py
+++ b/mypy/exprtotype.py
@@ -5,7 +5,7 @@
     ListExpr, StrExpr, BytesExpr, UnicodeExpr, EllipsisExpr, CallExpr,
     get_member_expr_fullname
 )
-from mypy.fastparse import parse_type_comment, parse_type_string
+from mypy.fastparse import parse_type_string
 from mypy.types import (
     Type, UnboundType, TypeList, EllipsisType, AnyType, Optional, CallableArgument, TypeOfAny,
     RawLiteralType,
@@ -111,8 +111,15 @@ def expr_to_unanalyzed_type(expr: Expression, _parent: Optional[Expression] = No
     elif isinstance(expr, ListExpr):
         return TypeList([expr_to_unanalyzed_type(t, expr) for t in expr.items],
                         line=expr.line, column=expr.column)
-    elif isinstance(expr, (StrExpr, BytesExpr, UnicodeExpr)):
-        return parse_type_string(expr.value, expr.line, expr.column)
+    elif isinstance(expr, StrExpr):
+        return parse_type_string(expr.value, 'builtins.str', expr.line, expr.column,
+                                 assume_str_is_unicode=expr.from_python_3)
+    elif isinstance(expr, BytesExpr):
+        return parse_type_string(expr.value, 'builtins.bytes', expr.line, expr.column,
+                                 assume_str_is_unicode=False)
+    elif isinstance(expr, UnicodeExpr):
+        return parse_type_string(expr.value, 'builtins.unicode', expr.line, expr.column,
+                                 assume_str_is_unicode=True)
     elif isinstance(expr, UnaryExpr):
         typ = expr_to_unanalyzed_type(expr.expr)
         if isinstance(typ, RawLiteralType) and isinstance(typ.value, int) and expr.op == '-':
diff --git a/mypy/fastparse.py b/mypy/fastparse.py
index 1ce5388917a0..7c10e14fc1ac 100644
--- a/mypy/fastparse.py
+++ b/mypy/fastparse.py
@@ -51,6 +51,7 @@
         NameConstant,
         Expression as ast3_Expression,
         Str,
+        Bytes,
         Index,
         Num,
         UnaryOp,
@@ -140,7 +141,11 @@ def parse(source: Union[str, bytes],
     return tree
 
 
-def parse_type_comment(type_comment: str, line: int, errors: Optional[Errors]) -> Optional[Type]:
+def parse_type_comment(type_comment: str,
+                       line: int,
+                       errors: Optional[Errors],
+                       assume_str_is_unicode: bool = True,
+                       ) -> Optional[Type]:
     try:
         typ = ast3.parse(type_comment, '<type_comment>', 'eval')
     except SyntaxError as e:
@@ -151,24 +156,39 @@ def parse_type_comment(type_comment: str, line: int, errors: Optional[Errors]) -
             raise
     else:
         assert isinstance(typ, ast3_Expression)
-        return TypeConverter(errors, line=line).visit(typ.body)
+        return TypeConverter(errors, line=line,
+                             assume_str_is_unicode=assume_str_is_unicode).visit(typ.body)
 
 
-def parse_type_string(expr_string: str, line: int, column: int) -> Type:
-    """Parses a type that was originally present inside of an explicit string.
+def parse_type_string(expr_string: str, expr_fallback_name: str,
+                      line: int, column: int, assume_str_is_unicode: bool = True) -> Type:
+    """Parses a type that was originally present inside of an explicit string,
+    byte string, or unicode string.
 
     For example, suppose we have the type `Foo["blah"]`. We should parse the
     string expression "blah" using this function.
+
+    If `assume_str_is_unicode` is set to true, this function will assume that
+    `Foo["blah"]` is equivalent to `Foo[u"blah"]`. Otherwise, it assumes it's
+    equivalent to `Foo[b"blah"]`.
+
+    The caller is responsible for keeping track of the context in which the
+    type string was encountered (e.g. in Python 3 code, Python 2 code, Python 2
+    code with unicode_literals...) and setting `assume_str_is_unicode` accordingly.
     """
     try:
-        node = parse_type_comment(expr_string.strip(), line=line, errors=None)
+        node = parse_type_comment(expr_string.strip(), line=line, errors=None,
+                                  assume_str_is_unicode=assume_str_is_unicode)
         if isinstance(node, UnboundType) and node.original_str_expr is None:
             node.original_str_expr = expr_string
+            node.original_str_fallback = expr_fallback_name
             return node
         else:
-            return RawLiteralType(expr_string, 'builtins.str', line, column)
-    except SyntaxError:
-        return RawLiteralType(expr_string, 'builtins.str', line, column)
+            return RawLiteralType(expr_string, expr_fallback_name, line, column)
+    except (SyntaxError, ValueError):
+        # Note: the parser will raise a `ValueError` instead of a SyntaxError if
+        # the string happens to contain things like \x00.
+        return RawLiteralType(expr_string, expr_fallback_name, line, column)
 
 
 def is_no_type_check_decorator(expr: ast3.expr) -> bool:
@@ -966,10 +986,7 @@ def visit_FormattedValue(self, n: ast3.FormattedValue) -> Expression:
 
     # Bytes(bytes s)
     def visit_Bytes(self, n: ast3.Bytes) -> Union[BytesExpr, StrExpr]:
-        # The following line is a bit hacky, but is the best way to maintain
-        # compatibility with how mypy currently parses the contents of bytes literals.
-        contents = str(n.s)[2:-1]
-        e = BytesExpr(contents)
+        e = BytesExpr(bytes_to_human_readable_repr(n.s))
         return self.set_line(e, n)
 
     # NameConstant(singleton value)
@@ -1042,10 +1059,15 @@ def visit_Index(self, n: Index) -> Node:
 
 
 class TypeConverter:
-    def __init__(self, errors: Optional[Errors], line: int = -1) -> None:
+    def __init__(self,
+                 errors: Optional[Errors],
+                 line: int = -1,
+                 assume_str_is_unicode: bool = True,
+                 ) -> None:
         self.errors = errors
         self.line = line
         self.node_stack = []  # type: List[AST]
+        self.assume_str_is_unicode = assume_str_is_unicode
 
     @overload
     def visit(self, node: ast3.expr) -> Type: ...
@@ -1090,8 +1112,11 @@ def visit_raw_str(self, s: str) -> Type:
         # An escape hatch that allows the AST walker in fastparse2 to
         # directly hook into the Python 3.5 type converter in some cases
         # without needing to create an intermediary `Str` object.
-        return (parse_type_comment(s.strip(), self.line, self.errors) or
-                AnyType(TypeOfAny.from_error))
+        return (parse_type_comment(s.strip(),
+                                   self.line,
+                                   self.errors,
+                                   self.assume_str_is_unicode)
+                or AnyType(TypeOfAny.from_error))
 
     def visit_Call(self, e: Call) -> Type:
         # Parse the arg constructor
@@ -1190,7 +1215,22 @@ def visit_Num(self, n: Num) -> Type:
 
     # Str(string s)
     def visit_Str(self, n: Str) -> Type:
-        return parse_type_string(n.s, line=self.line, column=-1)
+        # Note: we transform these fallback types into the correct types in
+        # 'typeanal.py' -- specifically in the named_type_with_normalized_str method.
+        # If we're analyzing Python 3, that function will translate 'builtins.unicode'
+        # into 'builtins.str'. In contrast, if we're analyzing Python 2 code, we'll
+        # translate 'builtins.bytes' in the method below into 'builtins.str'.
+        if 'u' in n.kind or self.assume_str_is_unicode:
+            return parse_type_string(n.s, 'builtins.unicode', self.line, n.col_offset,
+                                     assume_str_is_unicode=self.assume_str_is_unicode)
+        else:
+            return parse_type_string(n.s, 'builtins.str', self.line, n.col_offset,
+                                     assume_str_is_unicode=self.assume_str_is_unicode)
+
+    # Bytes(bytes s)
+    def visit_Bytes(self, n: Bytes) -> Type:
+        contents = bytes_to_human_readable_repr(n.s)
+        return RawLiteralType(contents, 'builtins.bytes', self.line, column=n.col_offset)
 
     # Subscript(expr value, slice slice, expr_context ctx)
     def visit_Subscript(self, n: ast3.Subscript) -> Type:
@@ -1246,3 +1286,17 @@ def stringify_name(n: AST) -> Optional[str]:
         if sv is not None:
             return "{}.{}".format(sv, n.attr)
     return None  # Can't do it.
+
+
+def bytes_to_human_readable_repr(b: bytes) -> str:
+    """Converts bytes into some human-readable representation. Unprintable
+    bytes such as the nul byte are escaped. For example:
+
+        >>> b = bytes([102, 111, 111, 10, 0])
+        >>> s = bytes_to_human_readable_repr(b)
+        >>> print(s)
+        foo\n\x00
+        >>> print(repr(s))
+        'foo\\n\\x00'
+    """
+    return str(b)[2:-1]
diff --git a/mypy/fastparse2.py b/mypy/fastparse2.py
index e33469341140..876864796ebe 100644
--- a/mypy/fastparse2.py
+++ b/mypy/fastparse2.py
@@ -45,7 +45,7 @@
 )
 from mypy import messages
 from mypy.errors import Errors
-from mypy.fastparse import TypeConverter, parse_type_comment
+from mypy.fastparse import TypeConverter, parse_type_comment, bytes_to_human_readable_repr
 from mypy.options import Options
 
 try:
@@ -113,7 +113,6 @@ def parse(source: Union[str, bytes],
         assert options.python_version[0] < 3 and not is_stub_file
         ast = ast27.parse(source, fnam, 'exec')
         tree = ASTConverter(options=options,
-                            is_stub=is_stub_file,
                             errors=errors,
                             ).visit(ast)
         assert isinstance(tree, MypyFile)
@@ -141,15 +140,32 @@ def is_no_type_check_decorator(expr: ast27.expr) -> bool:
 class ASTConverter:
     def __init__(self,
                  options: Options,
-                 is_stub: bool,
                  errors: Errors) -> None:
         self.class_nesting = 0
         self.imports = []  # type: List[ImportBase]
 
         self.options = options
-        self.is_stub = is_stub
         self.errors = errors
 
+        # Indicates whether this file is being parsed with unicode_literals enabled.
+        # Note: typed_ast already naturally takes unicode_literals into account when
+        # parsing so we don't have to worry when analyzing strings within this class.
+        #
+        # The only place where we use this field is when we call fastparse's TypeConverter
+        # and any related methods. That class accepts a Python 3 AST instead of a Python 2
+        # AST: as a result, it don't special-case the `unicode_literals` import and won't know
+        # exactly whether to parse some string as bytes or unicode.
+        #
+        # This distinction is relevant mostly when handling Literal types -- Literal[u"foo"]
+        # is not the same type as Literal[b"foo"], and Literal["foo"] could mean either the
+        # former or the latter based on context.
+        #
+        # This field is set in the 'visit_ImportFrom' method: it's ok to delay computing it
+        # because any `from __future__ import blah` import must be located at the top of the
+        # file, with the exception of the docstring. This means we're guaranteed to correctly
+        # set this field before we encounter any type hints.
+        self.unicode_literals = False
+
         # Cache of visit_X methods keyed by type of visited object
         self.visitor_cache = {}  # type: Dict[type, Callable[[Optional[AST]], Any]]
 
@@ -306,7 +322,8 @@ def visit_Module(self, mod: ast27.Module) -> MypyFile:
     #              arg? kwarg, expr* defaults)
     def visit_FunctionDef(self, n: ast27.FunctionDef) -> Statement:
         lineno = n.lineno
-        converter = TypeConverter(self.errors, line=lineno)
+        converter = TypeConverter(self.errors, line=lineno,
+                                  assume_str_is_unicode=self.unicode_literals)
         args, decompose_stmts = self.transform_args(n.args, lineno)
 
         arg_kinds = [arg.kind for arg in args]
@@ -413,7 +430,8 @@ def transform_args(self,
                        line: int,
                        ) -> Tuple[List[Argument], List[Statement]]:
         type_comments = n.type_comments  # type: Sequence[Optional[str]]
-        converter = TypeConverter(self.errors, line=line)
+        converter = TypeConverter(self.errors, line=line,
+                                  assume_str_is_unicode=self.unicode_literals)
         decompose_stmts = []  # type: List[Statement]
 
         n_args = n.args
@@ -532,7 +550,8 @@ def visit_Delete(self, n: ast27.Delete) -> DelStmt:
     def visit_Assign(self, n: ast27.Assign) -> AssignmentStmt:
         typ = None
         if n.type_comment:
-            typ = parse_type_comment(n.type_comment, n.lineno, self.errors)
+            typ = parse_type_comment(n.type_comment, n.lineno, self.errors,
+                                     assume_str_is_unicode=self.unicode_literals)
 
         stmt = AssignmentStmt(self.translate_expr_list(n.targets),
                               self.visit(n.value),
@@ -549,7 +568,8 @@ def visit_AugAssign(self, n: ast27.AugAssign) -> OperatorAssignmentStmt:
     # For(expr target, expr iter, stmt* body, stmt* orelse, string? type_comment)
     def visit_For(self, n: ast27.For) -> ForStmt:
         if n.type_comment is not None:
-            target_type = parse_type_comment(n.type_comment, n.lineno, self.errors)
+            target_type = parse_type_comment(n.type_comment, n.lineno, self.errors,
+                                             assume_str_is_unicode=self.unicode_literals)
         else:
             target_type = None
         stmt = ForStmt(self.visit(n.target),
@@ -576,7 +596,8 @@ def visit_If(self, n: ast27.If) -> IfStmt:
     # With(withitem* items, stmt* body, string? type_comment)
     def visit_With(self, n: ast27.With) -> WithStmt:
         if n.type_comment is not None:
-            target_type = parse_type_comment(n.type_comment, n.lineno, self.errors)
+            target_type = parse_type_comment(n.type_comment, n.lineno, self.errors,
+                                             assume_str_is_unicode=self.unicode_literals)
         else:
             target_type = None
         stmt = WithStmt([self.visit(n.context_expr)],
@@ -680,9 +701,12 @@ def visit_ImportFrom(self, n: ast27.ImportFrom) -> ImportBase:
             mod = n.module if n.module is not None else ''
             i = ImportAll(mod, n.level)  # type: ImportBase
         else:
-            i = ImportFrom(self.translate_module_id(n.module) if n.module is not None else '',
-                           n.level,
-                           [(a.name, a.asname) for a in n.names])
+            module_id = self.translate_module_id(n.module) if n.module is not None else ''
+            i = ImportFrom(module_id, n.level, [(a.name, a.asname) for a in n.names])
+
+            # See comments in the constructor for more information about this field.
+            if module_id == '__future__' and any(a.name == 'unicode_literals' for a in n.names):
+                self.unicode_literals = True
         self.imports.append(i)
         return self.set_line(i, n)
 
@@ -900,18 +924,17 @@ def visit_Num(self, n: ast27.Num) -> Expression:
 
     # Str(string s)
     def visit_Str(self, n: ast27.Str) -> Expression:
-        # Hack: assume all string literals in Python 2 stubs are normal
-        # strs (i.e. not unicode).  All stubs are parsed with the Python 3
-        # parser, which causes unprefixed string literals to be interpreted
-        # as unicode instead of bytes.  This hack is generally okay,
-        # because mypy considers str literals to be compatible with
-        # unicode.
+        # Note: typed_ast.ast27 will handled unicode_literals for us. If
+        # n.s is of type 'bytes', we know unicode_literals was not enabled;
+        # otherwise we know it was.
+        #
+        # Note that the following code is NOT run when parsing Python 2.7 stubs:
+        # we always parse stub files (no matter what version) using the Python 3
+        # parser. This is also why string literals in Python 2.7 stubs are assumed
+        # to be unicode.
         if isinstance(n.s, bytes):
-            value = n.s
-            # The following line is a bit hacky, but is the best way to maintain
-            # compatibility with how mypy currently parses the contents of bytes literals.
-            contents = str(value)[2:-1]
-            e = StrExpr(contents)  # type: Union[StrExpr, UnicodeExpr]
+            contents = bytes_to_human_readable_repr(n.s)
+            e = StrExpr(contents, from_python_3=False)  # type: Union[StrExpr, UnicodeExpr]
             return self.set_line(e, n)
         else:
             e = UnicodeExpr(n.s)
diff --git a/mypy/literals.py b/mypy/literals.py
index 3240bad668f3..3fb3f10a5551 100644
--- a/mypy/literals.py
+++ b/mypy/literals.py
@@ -98,7 +98,7 @@ def visit_int_expr(self, e: IntExpr) -> Key:
         return ('Literal', e.value)
 
     def visit_str_expr(self, e: StrExpr) -> Key:
-        return ('Literal', e.value)
+        return ('Literal', e.value, e.from_python_3)
 
     def visit_bytes_expr(self, e: BytesExpr) -> Key:
         return ('Literal', e.value)
diff --git a/mypy/nodes.py b/mypy/nodes.py
index 0c4b8156dd92..e7e825b352ae 100644
--- a/mypy/nodes.py
+++ b/mypy/nodes.py
@@ -1232,9 +1232,24 @@ class StrExpr(Expression):
 
     value = ''
 
-    def __init__(self, value: str) -> None:
+    # Keeps track of whether this string originated from Python 2 source code vs
+    # Python 3 source code. We need to keep track of this information so we can
+    # correctly handle types that have "nested strings". For example, consider this
+    # type alias, where we have a forward reference to a literal type:
+    #
+    #     Alias = List["Literal['foo']"]
+    #
+    # When parsing this, we need to know whether the outer string and alias came from
+    # Python 2 code vs Python 3 code so we can determine whether the inner `Literal['foo']`
+    # is meant to be `Literal[u'foo']` or `Literal[b'foo']`.
+    #
+    # This field keeps track of that information.
+    from_python_3 = True
+
+    def __init__(self, value: str, from_python_3: bool = False) -> None:
         super().__init__()
         self.value = value
+        self.from_python_3 = from_python_3
 
     def accept(self, visitor: ExpressionVisitor[T]) -> T:
         return visitor.visit_str_expr(self)
@@ -1243,7 +1258,16 @@ def accept(self, visitor: ExpressionVisitor[T]) -> T:
 class BytesExpr(Expression):
     """Bytes literal"""
 
-    value = ''  # TODO use bytes
+    # Note: we deliberately do NOT use bytes here because it ends up
+    # unnecessarily complicating a lot of the result logic. For example,
+    # we'd have to worry about converting the bytes into a format we can
+    # easily serialize/deserialize to and from JSON, would have to worry
+    # about turning the bytes into a human-readable representation in
+    # error messages...
+    #
+    # It's more convenient to just store the human-readable representation
+    # from the very start.
+    value = ''
 
     def __init__(self, value: str) -> None:
         super().__init__()
@@ -1256,7 +1280,7 @@ def accept(self, visitor: ExpressionVisitor[T]) -> T:
 class UnicodeExpr(Expression):
     """Unicode literal (Python 2.x)"""
 
-    value = ''  # TODO use bytes
+    value = ''
 
     def __init__(self, value: str) -> None:
         super().__init__()
diff --git a/mypy/treetransform.py b/mypy/treetransform.py
index 981ea9bdf8a4..64345e59060b 100644
--- a/mypy/treetransform.py
+++ b/mypy/treetransform.py
@@ -304,7 +304,7 @@ def visit_int_expr(self, node: IntExpr) -> IntExpr:
         return IntExpr(node.value)
 
     def visit_str_expr(self, node: StrExpr) -> StrExpr:
-        return StrExpr(node.value)
+        return StrExpr(node.value, node.from_python_3)
 
     def visit_bytes_expr(self, node: BytesExpr) -> BytesExpr:
         return BytesExpr(node.value)
diff --git a/mypy/typeanal.py b/mypy/typeanal.py
index f86c4171b938..a37aa4c6c03b 100644
--- a/mypy/typeanal.py
+++ b/mypy/typeanal.py
@@ -632,9 +632,10 @@ def analyze_literal_type(self, t: UnboundType) -> Type:
     def analyze_literal_param(self, idx: int, arg: Type, ctx: Context) -> Optional[List[Type]]:
         # This UnboundType was originally defined as a string.
         if isinstance(arg, UnboundType) and arg.original_str_expr is not None:
+            assert arg.original_str_fallback is not None
             return [LiteralType(
                 value=arg.original_str_expr,
-                fallback=self.named_type('builtins.str'),
+                fallback=self.named_type_with_normalized_str(arg.original_str_fallback),
                 line=arg.line,
                 column=arg.column,
             )]
@@ -670,7 +671,8 @@ def analyze_literal_param(self, idx: int, arg: Type, ctx: Context) -> Optional[L
                     ctx)
                 return None
 
-            fallback = self.named_type(arg.base_type_name)
+            # Remap bytes and unicode into the appropriate type for the correct Python version
+            fallback = self.named_type_with_normalized_str(arg.base_type_name)
             assert isinstance(fallback, Instance)
             return [LiteralType(arg.value, fallback, line=arg.line, column=arg.column)]
         elif isinstance(arg, (NoneTyp, LiteralType)):
@@ -792,6 +794,17 @@ def anal_var_defs(self, var_defs: List[TypeVarDef]) -> List[TypeVarDef]:
                                 vd.line))
         return a
 
+    def named_type_with_normalized_str(self, fully_qualified_name: str) -> Instance:
+        """Does almost the same thing as `named_type`, except that we immediately
+        unalias `builtins.bytes` and `builtins.unicode` to `builtins.str` as appropriate.
+        """
+        python_version = self.options.python_version
+        if python_version[0] == 2 and fully_qualified_name == 'builtins.bytes':
+            fully_qualified_name = 'builtins.str'
+        if python_version[0] >= 3 and fully_qualified_name == 'builtins.unicode':
+            fully_qualified_name = 'builtins.str'
+        return self.named_type(fully_qualified_name)
+
     def named_type(self, fully_qualified_name: str,
                    args: Optional[List[Type]] = None,
                    line: int = -1,
diff --git a/mypy/types.py b/mypy/types.py
index 5a7ecae041a9..d518ac092037 100644
--- a/mypy/types.py
+++ b/mypy/types.py
@@ -30,24 +30,29 @@
 # The set of all valid expressions that can currently be contained
 # inside of a Literal[...].
 #
-# Literals can contain enum-values: we special-case those and
-# store the value as a string.
+# Literals can contain bytes and enum-values: we special-case both of these
+# and store the value as a string. We rely on the fallback type that's also
+# stored with the Literal to determine how a string is being used.
 #
 # TODO: confirm that we're happy with representing enums (and the
 # other types) in the manner described above.
 #
-# Note: this type also happens to correspond to types that can be
-# directly converted into JSON. The serialize/deserialize methods
-# of 'LiteralType' relies on this, as well as
-# 'server.astdiff.SnapshotTypeVisitor' and 'types.TypeStrVisitor'.
-# If we end up adding any non-JSON-serializable types to this list,
-# we should make sure to edit those methods to match.
+# Note: if we change the set of types included below, we must also
+# make sure to audit the following methods:
 #
-# Alternatively, we should consider getting rid of this alias and
-# moving any shared special serialization/deserialization code into
-# RawLiteralType or something instead.
+# 1. types.LiteralType's serialize and deserialize methods: this method
+#    needs to make sure it can convert the below types into JSON and back.
+#
+# 2. types.LiteralType's 'alue_repr` method: this method is ultimately used
+#    by TypeStrVisitor's visit_literal_type to generate a reasonable
+#    repr-able output.
+#
+# 3. server.astdiff.SnapshotTypeVisitor's visit_literal_type_method: this
+#    method assumes that the following types supports equality checks and
+#    hashability.
 LiteralValue = Union[int, str, bool, None]
 
+
 # If we only import type_visitor in the middle of the file, mypy
 # breaks, and if we do it at the top, it breaks at runtime because of
 # import cycle issues, so we do it at the top while typechecking and
@@ -241,7 +246,8 @@ def deserialize(cls, data: JsonDict) -> 'TypeVarDef':
 class UnboundType(Type):
     """Instance type that has not been bound during semantic analysis."""
 
-    __slots__ = ('name', 'args', 'optional', 'empty_tuple_index', 'original_str_expr')
+    __slots__ = ('name', 'args', 'optional', 'empty_tuple_index',
+                 'original_str_expr', 'original_str_fallback')
 
     def __init__(self,
                  name: Optional[str],
@@ -251,6 +257,7 @@ def __init__(self,
                  optional: bool = False,
                  empty_tuple_index: bool = False,
                  original_str_expr: Optional[str] = None,
+                 original_str_fallback: Optional[str] = None,
                  ) -> None:
         super().__init__(line, column)
         if not args:
@@ -262,8 +269,8 @@ def __init__(self,
         self.optional = optional
         # Special case for X[()]
         self.empty_tuple_index = empty_tuple_index
-        # If this UnboundType was originally defined as a str, keep track of
-        # the original contents of that string. This way, if this UnboundExpr
+        # If this UnboundType was originally defined as a str or bytes, keep track of
+        # the original contents of that string-like thing. This way, if this UnboundExpr
         # ever shows up inside of a LiteralType, we can determine whether that
         # Literal[...] is valid or not. E.g. Literal[foo] is most likely invalid
         # (unless 'foo' is an alias for another literal or something) and
@@ -272,7 +279,11 @@ def __init__(self,
         # We keep track of the entire string instead of just using a boolean flag
         # so we can distinguish between things like Literal["foo"] vs
         # Literal["    foo   "].
+        #
+        # We also keep track of what the original base fallback type was supposed to be
+        # so we don't have to try and recompute it later
         self.original_str_expr = original_str_expr
+        self.original_str_fallback = original_str_fallback
 
     def accept(self, visitor: 'TypeVisitor[T]') -> T:
         return visitor.visit_unbound_type(self)
@@ -284,13 +295,15 @@ def __eq__(self, other: object) -> bool:
         if not isinstance(other, UnboundType):
             return NotImplemented
         return (self.name == other.name and self.optional == other.optional and
-                self.args == other.args and self.original_str_expr == other.original_str_expr)
+                self.args == other.args and self.original_str_expr == other.original_str_expr and
+                self.original_str_fallback == other.original_str_fallback)
 
     def serialize(self) -> JsonDict:
         return {'.class': 'UnboundType',
                 'name': self.name,
                 'args': [a.serialize() for a in self.args],
                 'expr': self.original_str_expr,
+                'expr_fallback': self.original_str_fallback,
                 }
 
     @classmethod
@@ -298,7 +311,9 @@ def deserialize(cls, data: JsonDict) -> 'UnboundType':
         assert data['.class'] == 'UnboundType'
         return UnboundType(data['name'],
                            [deserialize_type(a) for a in data['args']],
-                           original_str_expr=data['expr'])
+                           original_str_expr=data['expr'],
+                           original_str_fallback=data['expr_fallback'],
+                           )
 
 
 class CallableArgument(Type):
@@ -1368,6 +1383,29 @@ def __eq__(self, other: object) -> bool:
         else:
             return NotImplemented
 
+    def value_repr(self) -> str:
+        """Returns the string representation of the underlying type.
+
+        This function is almost equivalent to running `repr(self.value)`,
+        except it includes some additional logic to correctly handle cases
+        where the value is a string, byte string, or a unicode string.
+        """
+        raw = repr(self.value)
+        fallback_name = self.fallback.type.fullname()
+        if fallback_name == 'builtins.bytes':
+            # Note: 'builtins.bytes' only appears in Python 3, so we want to
+            # explicitly prefix with a "b"
+            return 'b' + raw
+        elif fallback_name == 'builtins.unicode':
+            # Similarly, 'builtins.unicode' only appears in Python 2, where we also
+            # want to explicitly prefix
+            return 'u' + raw
+        else:
+            # 'builtins.str' could mean either depending on context, but either way
+            # we don't prefix: it's the "native" string. And of course, if value is
+            # some other type, we just return that string repr directly.
+            return raw
+
     def serialize(self) -> Union[JsonDict, str]:
         return {
             '.class': 'LiteralType',
@@ -1838,7 +1876,7 @@ def visit_raw_literal_type(self, t: RawLiteralType) -> str:
         return repr(t.value)
 
     def visit_literal_type(self, t: LiteralType) -> str:
-        return 'Literal[{}]'.format(repr(t.value))
+        return 'Literal[{}]'.format(t.value_repr())
 
     def visit_star_type(self, t: StarType) -> str:
         s = t.type.accept(self)
diff --git a/test-data/unit/check-literal.test b/test-data/unit/check-literal.test
index 25cea27e8947..41854f64f066 100644
--- a/test-data/unit/check-literal.test
+++ b/test-data/unit/check-literal.test
@@ -215,6 +215,441 @@ reveal_type(expr_com_6)  # E: Revealed type is 'Literal['"foo"']'
 [builtins fixtures/bool.pyi]
 [out]
 
+[case testLiteralMixingUnicodeAndBytesPython3]
+from typing_extensions import Literal
+
+a_ann: Literal[u"foo"]
+b_ann: Literal["foo"]
+c_ann: Literal[b"foo"]
+
+a_hint = u"foo"  # type: Literal[u"foo"]
+b_hint = "foo"   # type: Literal["foo"]
+c_hint = b"foo"  # type: Literal[b"foo"]
+
+AAlias = Literal[u"foo"]
+BAlias = Literal["foo"]
+CAlias = Literal[b"foo"]
+a_alias: AAlias
+b_alias: BAlias
+c_alias: CAlias
+
+def accepts_str_1(x: Literal[u"foo"]) -> None: pass
+def accepts_str_2(x: Literal["foo"]) -> None: pass
+def accepts_bytes(x: Literal[b"foo"]) -> None: pass
+
+reveal_type(a_ann)      # E: Revealed type is 'Literal['foo']'
+reveal_type(b_ann)      # E: Revealed type is 'Literal['foo']'
+reveal_type(c_ann)      # E: Revealed type is 'Literal[b'foo']'
+reveal_type(a_hint)     # E: Revealed type is 'Literal['foo']'
+reveal_type(b_hint)     # E: Revealed type is 'Literal['foo']'
+reveal_type(c_hint)     # E: Revealed type is 'Literal[b'foo']'
+reveal_type(a_alias)    # E: Revealed type is 'Literal['foo']'
+reveal_type(b_alias)    # E: Revealed type is 'Literal['foo']'
+reveal_type(c_alias)    # E: Revealed type is 'Literal[b'foo']'
+
+accepts_str_1(a_ann)
+accepts_str_1(b_ann)
+accepts_str_1(c_ann)    # E: Argument 1 to "accepts_str_1" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+accepts_str_1(a_hint)
+accepts_str_1(b_hint)
+accepts_str_1(c_hint)   # E: Argument 1 to "accepts_str_1" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+accepts_str_1(a_alias)
+accepts_str_1(b_alias)
+accepts_str_1(c_alias)  # E: Argument 1 to "accepts_str_1" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+
+accepts_str_2(a_ann)
+accepts_str_2(b_ann)
+accepts_str_2(c_ann)    # E: Argument 1 to "accepts_str_2" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+accepts_str_2(a_hint)
+accepts_str_2(b_hint)
+accepts_str_2(c_hint)   # E: Argument 1 to "accepts_str_2" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+accepts_str_2(a_alias)
+accepts_str_2(b_alias)
+accepts_str_2(c_alias)  # E: Argument 1 to "accepts_str_2" has incompatible type "Literal[b'foo']"; expected "Literal['foo']"
+
+accepts_bytes(a_ann)    # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(b_ann)    # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(c_ann)
+accepts_bytes(a_hint)   # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(b_hint)   # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(c_hint)
+accepts_bytes(a_alias)  # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(b_alias)  # E: Argument 1 to "accepts_bytes" has incompatible type "Literal['foo']"; expected "Literal[b'foo']"
+accepts_bytes(c_alias)
+[out]
+
+[case testLiteralMixingUnicodeAndBytesPython2]
+# flags: --python-version 2.7
+from typing_extensions import Literal
+
+a_hint = u"foo"  # type: Literal[u"foo"]
+b_hint = "foo"   # type: Literal["foo"]
+c_hint = b"foo"  # type: Literal[b"foo"]
+
+AAlias = Literal[u"foo"]
+BAlias = Literal["foo"]
+CAlias = Literal[b"foo"]
+a_alias = u"foo"  # type: AAlias
+b_alias = "foo"   # type: BAlias
+c_alias = b"foo"  # type: CAlias
+
+def accepts_unicode(x):
+    # type: (Literal[u"foo"]) -> None
+    pass
+def accepts_bytes_1(x):
+    # type: (Literal["foo"]) -> None
+    pass
+def accepts_bytes_2(x):
+    # type: (Literal[b"foo"]) -> None
+    pass
+
+reveal_type(a_hint)       # E: Revealed type is 'Literal[u'foo']'
+reveal_type(b_hint)       # E: Revealed type is 'Literal['foo']'
+reveal_type(c_hint)       # E: Revealed type is 'Literal['foo']'
+reveal_type(a_alias)      # E: Revealed type is 'Literal[u'foo']'
+reveal_type(b_alias)      # E: Revealed type is 'Literal['foo']'
+reveal_type(c_alias)      # E: Revealed type is 'Literal['foo']'
+
+accepts_unicode(a_hint)
+accepts_unicode(b_hint)   # E: Argument 1 to "accepts_unicode" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+accepts_unicode(c_hint)   # E: Argument 1 to "accepts_unicode" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+accepts_unicode(a_alias)
+accepts_unicode(b_alias)  # E: Argument 1 to "accepts_unicode" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+accepts_unicode(c_alias)  # E: Argument 1 to "accepts_unicode" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+
+accepts_bytes_1(a_hint)   # E: Argument 1 to "accepts_bytes_1" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes_1(b_hint)
+accepts_bytes_1(c_hint)
+accepts_bytes_1(a_alias)  # E: Argument 1 to "accepts_bytes_1" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes_1(b_alias)
+accepts_bytes_1(c_alias)
+
+accepts_bytes_2(a_hint)   # E: Argument 1 to "accepts_bytes_2" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes_2(b_hint)
+accepts_bytes_2(c_hint)
+accepts_bytes_2(a_alias)  # E: Argument 1 to "accepts_bytes_2" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes_2(b_alias)
+accepts_bytes_2(c_alias)
+[builtins fixtures/primitives.pyi]
+[out]
+
+[case testLiteralMixingUnicodeAndBytesPython2UnicodeLiterals]
+# flags: --python-version 2.7
+from __future__ import unicode_literals
+from typing_extensions import Literal
+
+a_hint = u"foo"  # type: Literal[u"foo"]
+b_hint = "foo"   # type: Literal["foo"]
+c_hint = b"foo"  # type: Literal[b"foo"]
+
+AAlias = Literal[u"foo"]
+BAlias = Literal["foo"]
+CAlias = Literal[b"foo"]
+a_alias = u"foo"  # type: AAlias
+b_alias = "foo"   # type: BAlias
+c_alias = b"foo"  # type: CAlias
+
+def accepts_unicode_1(x):
+    # type: (Literal[u"foo"]) -> None
+    pass
+def accepts_unicode_2(x):
+    # type: (Literal["foo"]) -> None
+    pass
+def accepts_bytes(x):
+    # type: (Literal[b"foo"]) -> None
+    pass
+
+reveal_type(a_hint)       # E: Revealed type is 'Literal[u'foo']'
+reveal_type(b_hint)       # E: Revealed type is 'Literal[u'foo']'
+reveal_type(c_hint)       # E: Revealed type is 'Literal['foo']'
+reveal_type(a_alias)      # E: Revealed type is 'Literal[u'foo']'
+reveal_type(b_alias)      # E: Revealed type is 'Literal[u'foo']'
+reveal_type(c_alias)      # E: Revealed type is 'Literal['foo']'
+
+accepts_unicode_1(a_hint)
+accepts_unicode_1(b_hint)
+accepts_unicode_1(c_hint)   # E: Argument 1 to "accepts_unicode_1" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+accepts_unicode_1(a_alias)
+accepts_unicode_1(b_alias)
+accepts_unicode_1(c_alias)  # E: Argument 1 to "accepts_unicode_1" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+
+accepts_unicode_2(a_hint)
+accepts_unicode_2(b_hint)
+accepts_unicode_2(c_hint)   # E: Argument 1 to "accepts_unicode_2" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+accepts_unicode_2(a_alias)
+accepts_unicode_2(b_alias)
+accepts_unicode_2(c_alias)  # E: Argument 1 to "accepts_unicode_2" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+
+accepts_bytes(a_hint)       # E: Argument 1 to "accepts_bytes" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes(b_hint)       # E: Argument 1 to "accepts_bytes" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes(c_hint)
+accepts_bytes(a_alias)      # E: Argument 1 to "accepts_bytes" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes(b_alias)      # E: Argument 1 to "accepts_bytes" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+accepts_bytes(c_alias)
+[builtins fixtures/primitives.pyi]
+[out]
+
+[case testLiteralMixingUnicodeAndBytesPython3ForwardStrings]
+from typing import TypeVar, Generic
+from typing_extensions import Literal
+
+a_unicode_wrapper: u"Literal[u'foo']"
+b_unicode_wrapper: u"Literal['foo']"
+c_unicode_wrapper: u"Literal[b'foo']"
+
+a_str_wrapper: "Literal[u'foo']"
+b_str_wrapper: "Literal['foo']"
+c_str_wrapper: "Literal[b'foo']"
+
+# In Python 3, forward references MUST be str, not bytes
+a_bytes_wrapper: b"Literal[u'foo']"  # E: Invalid type: syntax error in type comment
+b_bytes_wrapper: b"Literal['foo']"   # E: Invalid type: syntax error in type comment
+c_bytes_wrapper: b"Literal[b'foo']"  # E: Invalid type: syntax error in type comment
+
+reveal_type(a_unicode_wrapper)  # E: Revealed type is 'Literal['foo']'
+reveal_type(b_unicode_wrapper)  # E: Revealed type is 'Literal['foo']'
+reveal_type(c_unicode_wrapper)  # E: Revealed type is 'Literal[b'foo']'
+
+reveal_type(a_str_wrapper)      # E: Revealed type is 'Literal['foo']'
+reveal_type(b_str_wrapper)      # E: Revealed type is 'Literal['foo']'
+reveal_type(c_str_wrapper)      # E: Revealed type is 'Literal[b'foo']'
+
+T = TypeVar('T')
+class Wrap(Generic[T]): pass
+
+AUnicodeWrapperAlias = Wrap[u"Literal[u'foo']"]
+BUnicodeWrapperAlias = Wrap[u"Literal['foo']"]
+CUnicodeWrapperAlias = Wrap[u"Literal[b'foo']"]
+a_unicode_wrapper_alias: AUnicodeWrapperAlias
+b_unicode_wrapper_alias: BUnicodeWrapperAlias
+c_unicode_wrapper_alias: CUnicodeWrapperAlias
+
+AStrWrapperAlias = Wrap["Literal[u'foo']"]
+BStrWrapperAlias = Wrap["Literal['foo']"]
+CStrWrapperAlias = Wrap["Literal[b'foo']"]
+a_str_wrapper_alias: AStrWrapperAlias
+b_str_wrapper_alias: BStrWrapperAlias
+c_str_wrapper_alias: CStrWrapperAlias
+
+ABytesWrapperAlias = Wrap[b"Literal[u'foo']"]
+BBytesWrapperAlias = Wrap[b"Literal['foo']"]
+CBytesWrapperAlias = Wrap[b"Literal[b'foo']"]
+a_bytes_wrapper_alias: ABytesWrapperAlias
+b_bytes_wrapper_alias: BBytesWrapperAlias
+c_bytes_wrapper_alias: CBytesWrapperAlias
+
+# In Python 3, we assume that Literal['foo'] and Literal[u'foo'] are always
+# equivalent, no matter what.
+reveal_type(a_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(b_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal[b'foo']]'
+
+reveal_type(a_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(b_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal[b'foo']]'
+
+reveal_type(a_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(b_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal[b'foo']]'
+[out]
+
+[case testLiteralMixingUnicodeAndBytesPython2ForwardStrings]
+# flags: --python-version 2.7
+from typing import TypeVar, Generic
+from typing_extensions import Literal
+
+T = TypeVar('T')
+class Wrap(Generic[T]): pass
+
+AUnicodeWrapperAlias = Wrap[u"Literal[u'foo']"]
+BUnicodeWrapperAlias = Wrap[u"Literal['foo']"]
+CUnicodeWrapperAlias = Wrap[u"Literal[b'foo']"]
+a_unicode_wrapper_alias = Wrap()  # type: AUnicodeWrapperAlias
+b_unicode_wrapper_alias = Wrap()  # type: BUnicodeWrapperAlias
+c_unicode_wrapper_alias = Wrap()  # type: CUnicodeWrapperAlias
+
+AStrWrapperAlias = Wrap["Literal[u'foo']"]
+BStrWrapperAlias = Wrap["Literal['foo']"]
+CStrWrapperAlias = Wrap["Literal[b'foo']"]
+a_str_wrapper_alias = Wrap()  # type: AStrWrapperAlias
+b_str_wrapper_alias = Wrap()  # type: BStrWrapperAlias
+c_str_wrapper_alias = Wrap()  # type: CStrWrapperAlias
+
+ABytesWrapperAlias = Wrap[b"Literal[u'foo']"]
+BBytesWrapperAlias = Wrap[b"Literal['foo']"]
+CBytesWrapperAlias = Wrap[b"Literal[b'foo']"]
+a_bytes_wrapper_alias = Wrap()  # type: ABytesWrapperAlias
+b_bytes_wrapper_alias = Wrap()  # type: BBytesWrapperAlias
+c_bytes_wrapper_alias = Wrap()  # type: CBytesWrapperAlias
+
+# Unlike Python 3, the exact meaning of Literal['foo'] is "inherited" from the "outer"
+# string. For example, the "outer" string is unicode in the first example here. So
+# we treat Literal['foo'] as the same as Literal[u'foo'].
+reveal_type(a_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(c_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+
+# However, for both of these examples, the "outer" string is bytes, so we don't treat
+# Literal['foo'] as a unicode Literal.
+reveal_type(a_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+
+reveal_type(a_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+[out]
+
+[case testLiteralMixingUnicodeAndBytesPython2ForwardStringsUnicodeLiterals]
+# flags: --python-version 2.7
+from __future__ import unicode_literals
+from typing import TypeVar, Generic
+from typing_extensions import Literal
+
+T = TypeVar('T')
+class Wrap(Generic[T]): pass
+
+AUnicodeWrapperAlias = Wrap[u"Literal[u'foo']"]
+BUnicodeWrapperAlias = Wrap[u"Literal['foo']"]
+CUnicodeWrapperAlias = Wrap[u"Literal[b'foo']"]
+a_unicode_wrapper_alias = Wrap()  # type: AUnicodeWrapperAlias
+b_unicode_wrapper_alias = Wrap()  # type: BUnicodeWrapperAlias
+c_unicode_wrapper_alias = Wrap()  # type: CUnicodeWrapperAlias
+
+AStrWrapperAlias = Wrap["Literal[u'foo']"]
+BStrWrapperAlias = Wrap["Literal['foo']"]
+CStrWrapperAlias = Wrap["Literal[b'foo']"]
+a_str_wrapper_alias = Wrap()  # type: AStrWrapperAlias
+b_str_wrapper_alias = Wrap()  # type: BStrWrapperAlias
+c_str_wrapper_alias = Wrap()  # type: CStrWrapperAlias
+
+ABytesWrapperAlias = Wrap[b"Literal[u'foo']"]
+BBytesWrapperAlias = Wrap[b"Literal['foo']"]
+CBytesWrapperAlias = Wrap[b"Literal[b'foo']"]
+a_bytes_wrapper_alias = Wrap()  # type: ABytesWrapperAlias
+b_bytes_wrapper_alias = Wrap()  # type: BBytesWrapperAlias
+c_bytes_wrapper_alias = Wrap()  # type: CBytesWrapperAlias
+
+# This example is almost identical to the previous one, except that we're using
+# unicode literals. The first and last examples remain the same, but the middle
+# one changes:
+reveal_type(a_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(c_unicode_wrapper_alias)  # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+
+# Since unicode_literals is enabled, the "outer" string in Wrap["Literal['foo']"] is now
+# a unicode string, so we end up treating Literal['foo'] as the same as Literal[u'foo'].
+reveal_type(a_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(c_str_wrapper_alias)      # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+
+reveal_type(a_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal[u'foo']]'
+reveal_type(b_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+reveal_type(c_bytes_wrapper_alias)    # E: Revealed type is '__main__.Wrap[Literal['foo']]'
+[out]
+
+[case testLiteralMixingUnicodeAndBytesInconsistentUnicodeLiterals]
+# flags: --python-version 2.7
+import mod_unicode as u
+import mod_bytes as b
+
+reveal_type(u.func)   # E: Revealed type is 'def (x: Literal[u'foo'])'
+reveal_type(u.var)    # E: Revealed type is 'Literal[u'foo']'
+reveal_type(b.func)   # E: Revealed type is 'def (x: Literal['foo'])'
+reveal_type(b.var)    # E: Revealed type is 'Literal['foo']'
+
+from_u = u"foo"       # type: u.Alias
+from_b = "foo"        # type: b.Alias
+
+u.func(u.var)
+u.func(from_u)
+u.func(b.var)         # E: Argument 1 to "func" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+u.func(from_b)        # E: Argument 1 to "func" has incompatible type "Literal['foo']"; expected "Literal[u'foo']"
+
+b.func(u.var)         # E: Argument 1 to "func" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+b.func(from_u)        # E: Argument 1 to "func" has incompatible type "Literal[u'foo']"; expected "Literal['foo']"
+b.func(b.var)
+b.func(from_b)
+
+[file mod_unicode.py]
+from __future__ import unicode_literals
+from typing_extensions import Literal
+
+def func(x):
+    # type: (Literal["foo"]) -> None
+    pass
+
+Alias = Literal["foo"]
+var = "foo"  # type: Alias
+
+[file mod_bytes.py]
+from typing_extensions import Literal
+
+def func(x):
+    # type: (Literal["foo"]) -> None
+    pass
+
+Alias = Literal["foo"]
+var = "foo"  # type: Alias
+[out]
+
+[case testLiteralUnicodeWeirdCharacters]
+from typing import Any
+from typing_extensions import Literal
+
+a1: Literal["\x00\xAC\x62 \u2227 \u03bb(p)"]
+b1: Literal["\x00¬b ∧ λ(p)"]
+c1: Literal["¬b ∧ λ(p)"]
+d1: Literal["\U0001F600"]
+e1: Literal["😀"]
+
+Alias1 = Literal["\x00\xAC\x62 \u2227 \u03bb(p)"]
+Alias2 = Literal["\x00¬b ∧ λ(p)"]
+Alias3 = Literal["¬b ∧ λ(p)"]
+Alias4 = Literal["\U0001F600"]
+Alias5 = Literal["😀"]
+a2: Alias1
+b2: Alias2
+c2: Alias3
+d2: Alias4
+e2: Alias5
+
+blah: Any
+a3 = blah  # type: Literal["\x00\xAC\x62 \u2227 \u03bb(p)"]
+b3 = blah  # type: Literal["\x00¬b ∧ λ(p)"]
+c3 = blah  # type: Literal["¬b ∧ λ(p)"]
+d3 = blah  # type: Literal["\U0001F600"]
+e3 = blah  # type: Literal["😀"]
+
+reveal_type(a1)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(b1)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(c1)  # E: Revealed type is 'Literal['¬b ∧ λ(p)']'
+reveal_type(d1)  # E: Revealed type is 'Literal['😀']'
+reveal_type(e1)  # E: Revealed type is 'Literal['😀']'
+
+reveal_type(a2)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(b2)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(c2)  # E: Revealed type is 'Literal['¬b ∧ λ(p)']'
+reveal_type(d2)  # E: Revealed type is 'Literal['😀']'
+reveal_type(e2)  # E: Revealed type is 'Literal['😀']'
+
+reveal_type(a3)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(b3)  # E: Revealed type is 'Literal['\x00¬b ∧ λ(p)']'
+reveal_type(c3)  # E: Revealed type is 'Literal['¬b ∧ λ(p)']'
+reveal_type(d3)  # E: Revealed type is 'Literal['😀']'
+reveal_type(e3)  # E: Revealed type is 'Literal['😀']'
+
+a1 = b1
+a1 = c1  # E: Incompatible types in assignment (expression has type "Literal['¬b ∧ λ(p)']", variable has type "Literal['\x00¬b ∧ λ(p)']")
+a1 = a2
+a1 = b2
+a1 = c2  # E: Incompatible types in assignment (expression has type "Literal['¬b ∧ λ(p)']", variable has type "Literal['\x00¬b ∧ λ(p)']")
+a1 = a3
+a1 = b3
+a1 = c3  # E: Incompatible types in assignment (expression has type "Literal['¬b ∧ λ(p)']", variable has type "Literal['\x00¬b ∧ λ(p)']")
+
+[out skip-path-normalization]
+
 [case testLiteralRenamingImportWorks]
 from typing_extensions import Literal as Foo