@@ -580,43 +580,43 @@ fn consume_quoted_string<'a>(tokenizer: &mut Tokenizer<'a>, single_quote: bool)
580
580
if tokenizer. is_eof ( ) {
581
581
return Ok ( Borrowed ( tokenizer. slice_from ( start_pos) ) )
582
582
}
583
- match tokenizer. next_char ( ) {
584
- '"' if !single_quote => {
583
+ match tokenizer. next_byte_unchecked ( ) {
584
+ b '"' if !single_quote => {
585
585
let value = tokenizer. slice_from ( start_pos) ;
586
586
tokenizer. advance ( 1 ) ;
587
587
return Ok ( Borrowed ( value) )
588
588
}
589
- '\'' if single_quote => {
589
+ b '\'' if single_quote => {
590
590
let value = tokenizer. slice_from ( start_pos) ;
591
591
tokenizer. advance ( 1 ) ;
592
592
return Ok ( Borrowed ( value) )
593
593
}
594
- '\\' | '\0' => {
594
+ b '\\' | b '\0' => {
595
595
string = tokenizer. slice_from ( start_pos) . to_owned ( ) ;
596
596
break
597
597
}
598
- '\n' | '\r' | '\x0C' => return Err ( ( ) ) ,
598
+ b '\n' | b '\r' | b '\x0C' => return Err ( ( ) ) ,
599
599
_ => {
600
- tokenizer. consume_char ( ) ;
600
+ tokenizer. consume_byte ( ) ;
601
601
}
602
602
}
603
603
}
604
604
605
605
while !tokenizer. is_eof ( ) {
606
- if matches ! ( tokenizer. next_char ( ) , '\n' | '\r' | '\x0C' ) {
606
+ if matches ! ( tokenizer. next_byte_unchecked ( ) , b '\n' | b '\r' | b '\x0C') {
607
607
return Err ( ( ) ) ;
608
608
}
609
609
match tokenizer. consume_char ( ) {
610
610
'"' if !single_quote => break ,
611
611
'\'' if single_quote => break ,
612
612
'\\' => {
613
613
if !tokenizer. is_eof ( ) {
614
- match tokenizer. next_char ( ) {
614
+ match tokenizer. next_byte_unchecked ( ) {
615
615
// Escaped newline
616
- '\n' | '\x0C' => tokenizer. advance ( 1 ) ,
617
- '\r' => {
616
+ b '\n' | b '\x0C' => tokenizer. advance ( 1 ) ,
617
+ b '\r' => {
618
618
tokenizer. advance ( 1 ) ;
619
- if ! tokenizer. is_eof ( ) && tokenizer . next_char ( ) == '\n' {
619
+ if tokenizer. next_byte ( ) == Some ( b '\n') {
620
620
tokenizer. advance ( 1 ) ;
621
621
}
622
622
}
0 commit comments