Add a tokenizer for URLPatterns (#36362)

Not a lot of new tests start to pass because the actual parser is still
missing, so we're only passing tests for invalid inputs. The parser will
be added in the next PR.

This is part 3 of upstreaming the changes in
https://github.com/simonwuelker/servo/tree/urlpattern

---------

Signed-off-by: Simon Wülker <simon.wuelker@arcor.de>
This commit is contained in:
Simon Wülker 2025-04-07 08:25:32 +02:00 committed by GitHub
parent d1243a1867
commit 0a4174ad0e
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
3 changed files with 543 additions and 112 deletions

View file

@ -524,11 +524,17 @@ fn parse_a_pattern_string(
options: Options,
encoding_callback: EncodingCallback,
) -> Fallible<Vec<Part>> {
// FIXME: Implement this algorithm
let _ = input;
let _ = options;
let _ = encoding_callback;
// Step 1. Let parser be a new pattern parser whose encoding callback is encoding callback and
// segment wildcard regexp is the result of running generate a segment wildcard regexp given options.
let mut parser = PatternParser::new(
generate_a_segment_wildcard_regexp(options),
encoding_callback,
);
// Step 2. Set parsers token list to the result of running tokenize given input and "strict".
parser.token_list = tokenize(input, TokenizePolicy::Strict)?;
// TODO: Implement the rest of this algorithm
Ok(vec![])
}
@ -980,6 +986,69 @@ fn process_a_url_pattern_init(
/// <https://urlpattern.spec.whatwg.org/#encoding-callback>
type EncodingCallback = Box<dyn Fn(&str) -> Fallible<String>>;
/// <https://urlpattern.spec.whatwg.org/#token>
#[derive(Clone, Copy, Debug)]
#[allow(dead_code)] // index isn't used yet, because constructor strings aren't parsed
struct Token<'a> {
/// <https://urlpattern.spec.whatwg.org/#token-index>
index: usize,
/// <https://urlpattern.spec.whatwg.org/#token-value>
value: &'a str,
/// <https://urlpattern.spec.whatwg.org/#token-type>
token_type: TokenType,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum TokenType {
/// <https://urlpattern.spec.whatwg.org/#token-type-open>
Open,
/// <https://urlpattern.spec.whatwg.org/#token-type-close>
Close,
/// <https://urlpattern.spec.whatwg.org/#token-type-regexp>
Regexp,
/// <https://urlpattern.spec.whatwg.org/#token-type-name>
Name,
/// <https://urlpattern.spec.whatwg.org/#token-type-char>
Char,
/// <https://urlpattern.spec.whatwg.org/#token-type-escaped-char>
EscapedChar,
/// <https://urlpattern.spec.whatwg.org/#token-type-other-modifier>
OtherModifier,
/// <https://urlpattern.spec.whatwg.org/#token-type-asterisk>
Asterisk,
/// <https://urlpattern.spec.whatwg.org/#token-type-end>
End,
/// <https://urlpattern.spec.whatwg.org/#token-type-invalid-char>
InvalidChar,
}
/// <https://urlpattern.spec.whatwg.org/#pattern-parser>
struct PatternParser<'a> {
/// <https://urlpattern.spec.whatwg.org/#pattern-parser-token-list>
token_list: Vec<Token<'a>>,
}
/// <https://urlpattern.spec.whatwg.org/#tokenize-policy>
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum TokenizePolicy {
/// <https://urlpattern.spec.whatwg.org/#tokenize-policy-strict>
Strict,
/// <https://urlpattern.spec.whatwg.org/#tokenize-policy-lenient>
Lenient,
}
// FIXME: Deduplicate this with the url crate
/// <https://url.spec.whatwg.org/#special-scheme>
fn default_port_for_special_scheme(scheme: &str) -> Option<u16> {
@ -1055,6 +1124,476 @@ enum PatternInitType {
Url,
}
impl PatternParser<'_> {
fn new(segment_wildcard_regexp: String, encoding_callback: EncodingCallback) -> Self {
// This function will look more useful when the parser is implemented
_ = segment_wildcard_regexp;
_ = encoding_callback;
Self { token_list: vec![] }
}
}
/// <https://urlpattern.spec.whatwg.org/#tokenizer>
struct Tokenizer<'a> {
input: &'a str,
/// <https://urlpattern.spec.whatwg.org/#tokenizer-policy>
policy: TokenizePolicy,
/// <https://urlpattern.spec.whatwg.org/#tokenizer-index>
///
/// Note that we deviate the from the spec and index bytes, not code points.
index: usize,
/// <https://urlpattern.spec.whatwg.org/#tokenizer-next-index>
///
/// Note that we deviate the from the spec and index bytes, not code points.
next_index: usize,
/// <https://urlpattern.spec.whatwg.org/#tokenizer-token-list>
token_list: Vec<Token<'a>>,
/// <https://urlpattern.spec.whatwg.org/#tokenizer-code-point>
code_point: char,
}
/// <https://urlpattern.spec.whatwg.org/#tokenize>
fn tokenize(input: &str, policy: TokenizePolicy) -> Fallible<Vec<Token>> {
// Step 1. Let tokenizer be a new tokenizer.
// Step 2. Set tokenizers input to input.
// Step 3. Set tokenizers policy to policy.
let mut tokenizer = Tokenizer {
input,
policy,
index: 0,
next_index: 0,
token_list: vec![],
code_point: char::MIN,
};
// Step 4. While tokenizers index is less than tokenizers inputs code point length:
while tokenizer.index < tokenizer.input.len() {
// Step 4.1 Run seek and get the next code point given tokenizer and tokenizers index.
tokenizer.seek_and_get_the_next_code_point(tokenizer.index);
match tokenizer.code_point {
// Step 4.2 If tokenizers code point is U+002A (*):
'*' => {
// Step 4.2.1 Run add a token with default position and length given tokenizer and "asterisk".
tokenizer.add_a_token_with_default_position_and_length(TokenType::Asterisk);
// Step 4.2.2 Continue.
continue;
},
// Step 4.3 If tokenizers code point is U+002B (+) or U+003F (?):
'+' | '?' => {
// Step 4.3.1 Run add a token with default position and length given tokenizer and "other-modifier".
tokenizer.add_a_token_with_default_position_and_length(TokenType::OtherModifier);
// Step 4.3.2 Continue.
continue;
},
// Step 4.4 If tokenizers code point is U+005C (\):
'\\' => {
// Step 4.4.1 If tokenizers index is equal to tokenizers inputs code point length 1:
if tokenizer.is_done() {
// Step 4.4.1.1 Run process a tokenizing error given tokenizer, tokenizers next index,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(tokenizer.next_index, tokenizer.index)?;
// Step 4.4.1.2 Continue.
continue;
}
// Step 4.4.2 Let escaped index be tokenizers next index.
let escaped_index = tokenizer.index;
// Step 4.4.3 Run get the next code point given tokenizer.
tokenizer.get_the_next_code_point();
// Step 4.4.4 Run add a token with default length given tokenizer, "escaped-char",
// tokenizers next index, and escaped index.
tokenizer.add_a_token_with_default_length(
TokenType::EscapedChar,
tokenizer.next_index,
escaped_index,
);
// Step 4.4.5 Continue.
continue;
},
// Step 4.5 If tokenizers code point is U+007B ({):
'{' => {
// Step 4.5.1 Run add a token with default position and length given tokenizer and "open".
tokenizer.add_a_token_with_default_position_and_length(TokenType::Open);
// Step 4.5.2 Continue.
continue;
},
// Step 4.6 If tokenizers code point is U+007D (}):
'}' => {
// Step 4.6.1 Run add a token with default position and length given tokenizer and "close".
tokenizer.add_a_token_with_default_position_and_length(TokenType::Close);
// Step 4.6.2 Continue.
continue;
},
// Step 4.7 If tokenizers code point is U+003A (:):
':' => {
// Step 4.7.1 Let name position be tokenizers next index.
let mut name_position = tokenizer.next_index;
// Step 4.7.2 Let name start be name position.
let name_start = name_position;
// Step 4.7.3 While name position is less than tokenizers inputs code point length:
while name_position < tokenizer.input.len() {
// Step 4.7.3.1 Run seek and get the next code point given tokenizer and name position.
tokenizer.seek_and_get_the_next_code_point(name_position);
// Step 4.7.3.2 Let first code point be true if name position equals name start
// and false otherwise.
let first_code_point = name_position == name_start;
// Step 4.7.3.3 Let valid code point be the result of running is a valid name
// code point given tokenizers code point and first code point.
let valid_code_point =
is_a_valid_name_code_point(tokenizer.code_point, first_code_point);
// Step 4.7.3.4 If valid code point is false break.
if !valid_code_point {
break;
}
// Step 4.6.3.5 Set name position to tokenizers next index.
name_position = tokenizer.next_index;
}
// Step 4.7.4 If name position is less than or equal to name start:
if name_position <= name_start {
// Step 4.7.4.1 Run process a tokenizing error given tokenizer, name start, and tokenizers index.
tokenizer.process_a_tokenizing_error(name_start, tokenizer.index)?;
// Step 4.7.4.2 Continue.
continue;
}
// Step 4.7.5 Run add a token with default length given tokenizer, "name", name position,
// and name start.
tokenizer.add_a_token_with_default_length(
TokenType::Name,
name_position,
name_start,
);
// Step 4.7.6 Continue.
continue;
},
// Step 4.8 If tokenizers code point is U+0028 (():
'(' => {
// Step 4.8.1 Let depth be 1.
let mut depth = 1;
// Step 4.8.2 Let regexp position be tokenizers next index.
let mut regexp_position = tokenizer.next_index;
// Step 4.8.3 Let regexp start be regexp position.
let regexp_start = regexp_position;
// Step 4.8.4 Let error be false.
let mut error = false;
// Step 4.8.5 While regexp position is less than tokenizers inputs code point length:
while regexp_position < tokenizer.input.len() {
// Step 4.8.5.1 Run seek and get the next code point given tokenizer and regexp position.
tokenizer.seek_and_get_the_next_code_point(regexp_position);
// Step 4.8.5.2 If tokenizers code point is not an ASCII code point:
if !tokenizer.code_point.is_ascii() {
// Step 4.8.5.1.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.1.2 Set error to true.
error = true;
// Step 4.8.5.1.2 Break.
break;
}
// Step 4.8.5.3 If regexp position equals regexp start and tokenizers code point is U+003F (?):
if regexp_position == regexp_start && tokenizer.code_point == '?' {
// Step 4.8.5.3.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.3.2 Set error to true.
error = true;
// Step 4.8.5.3.3 Break.
break;
}
// Step 4.8.5.4 If tokenizers code point is U+005C (\):
if tokenizer.code_point == '\\' {
// Step 4.8.5.4.1 If regexp position equals tokenizers inputs code point length 1:
if tokenizer.is_last_character(regexp_position) {
// Step 4.8.5.4.1.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.4.1.2 Set error to true.
error = true;
// Step 4.8.5.4.1.3 Break
break;
}
// Step 4.8.5.4.2 Run get the next code point given tokenizer.
tokenizer.get_the_next_code_point();
// Step 4.8.5.4.3 If tokenizers code point is not an ASCII code point:
if !tokenizer.code_point.is_ascii() {
// Step 4.8.5.4.3.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.4.3.2 Set error to true.
error = true;
// Step 4.8.5.4.3.3 Break
break;
}
// Step 4.8.5.4.4 Set regexp position to tokenizers next index.
regexp_position = tokenizer.next_index;
// Step 4.8.5.4.5 Continue.
continue;
}
// Step 4.8.5.5 If tokenizers code point is U+0029 ()):
if tokenizer.code_point == ')' {
// Step 4.8.5.5.1 Decrement depth by 1.
depth -= 1;
// Step 4.8.5.5.2 If depth is 0:
if depth == 0 {
// Step 4.8.5.5.2.1 Set regexp position to tokenizers next index.
regexp_position = tokenizer.next_index;
// Step 4.8.5.5.2.2 Break.
break;
}
}
// Step 4.8.5.6 Otherwise if tokenizers code point is U+0028 (():
else if tokenizer.code_point == '(' {
// Step 4.8.5.6.1 Increment depth by 1.
depth += 1;
// Step 4.8.5.6.2 If regexp position equals tokenizers inputs code point length 1:
if tokenizer.is_last_character(regexp_position) {
// Step 4.8.5.6.2.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.6.2.2 Set error to true.
error = true;
// Step 4.8.5.6.2.3 Break
break;
}
// Step 4.8.5.6.3 Let temporary position be tokenizers next index.
let temporary_position = tokenizer.next_index;
// Step 4.8.5.6.4 Run get the next code point given tokenizer.
tokenizer.get_the_next_code_point();
// Step 4.8.5.6.5 If tokenizers code point is not U+003F (?):
if tokenizer.code_point != '?' {
// Step 4.8.5.6.5.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.5.6.5.2 Set error to true.
error = true;
// Step 4.8.5.6.5.3 Break.
break;
}
// Step 4.8.5.6.6 Set tokenizers next index to temporary position.
tokenizer.next_index = temporary_position;
}
// Step 4.8.5.7 Set regexp position to tokenizers next index.
regexp_position = tokenizer.next_index;
}
// Step 4.8.6 If error is true continue.
if error {
continue;
}
// Step 4.8.7 If depth is not zero:
if depth != 0 {
// Step 4.8.7.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.7.2 Continue.
continue;
}
// Step 4.8.8 Let regexp length be regexp position regexp start 1.
let regexp_length = regexp_position - regexp_start - 1;
// Step 4.8.9 If regexp length is zero:
if regexp_length == 0 {
// Step 4.8.9.1 Run process a tokenizing error given tokenizer, regexp start,
// and tokenizers index.
tokenizer.process_a_tokenizing_error(regexp_start, tokenizer.index)?;
// Step 4.8.9.2 Continue.
continue;
}
// Step 4.8.10 Run add a token given tokenizer, "regexp", regexp position,
// regexp start, and regexp length.
tokenizer.add_a_token(
TokenType::Regexp,
regexp_position,
regexp_start,
regexp_length,
);
// Step 4.8.11 Continue.
continue;
},
_ => {
// Step 4.9 Run add a token with default position and length given tokenizer and "char".
tokenizer.add_a_token_with_default_position_and_length(TokenType::Char);
},
}
}
// Step 5. Run add a token with default length given tokenizer, "end", tokenizers index, and tokenizers index.
tokenizer.add_a_token_with_default_length(TokenType::End, tokenizer.index, tokenizer.index);
// Step 6.Return tokenizers token list.
Ok(tokenizer.token_list)
}
/// <https://urlpattern.spec.whatwg.org/#is-a-valid-name-code-point>
fn is_a_valid_name_code_point(code_point: char, first: bool) -> bool {
// FIXME: implement this check
_ = first;
code_point.is_alphabetic()
}
impl Tokenizer<'_> {
fn is_last_character(&self, position: usize) -> bool {
self.input[position..].chars().count() == 1
}
fn is_done(&self) -> bool {
self.input[self.next_index..].is_empty()
}
/// <https://urlpattern.spec.whatwg.org/#get-the-next-code-point>
fn get_the_next_code_point(&mut self) {
// Step 1. Set tokenizers code point to the Unicode code point in tokenizers
// input at the position indicated by tokenizers next index.
self.code_point = self.input[self.next_index..]
.chars()
.next()
.expect("URLPattern tokenizer is trying to read out of bounds");
// Step 2. Increment tokenizers next index by 1.
// NOTE: Because our next_index is indexing bytes (not code points) we use
// the utf8 length of the code point instead.
self.next_index = self.next_index.wrapping_add(self.code_point.len_utf8());
}
/// <https://urlpattern.spec.whatwg.org/#seek-and-get-the-next-code-point>
fn seek_and_get_the_next_code_point(&mut self, index: usize) {
// Step 1. Set tokenizers next index to index.
self.next_index = index;
// Step 2. Run get the next code point given tokenizer.
self.get_the_next_code_point();
}
/// <https://urlpattern.spec.whatwg.org/#add-a-token>
fn add_a_token(
&mut self,
token_type: TokenType,
next_position: usize,
value_position: usize,
value_length: usize,
) {
// Step 1. Let token be a new token.
// Step 2. Set tokens type to type.
// Step 3. Set tokens index to tokenizers index.
// Step 4. Set tokens value to the code point substring from value position
// with length value length within tokenizers input.
let token = Token {
token_type,
index: self.index,
value: &self.input[value_position..][..value_length],
};
// Step 5. Append token to the back of tokenizers token list.
self.token_list.push(token);
// Step 6. Set tokenizers index to next position.
self.index = next_position;
}
/// <https://urlpattern.spec.whatwg.org/#add-a-token-with-default-position-and-length>
fn add_a_token_with_default_position_and_length(&mut self, token_type: TokenType) {
// Step 1. Run add a token with default length given tokenizer, type,
// tokenizers next index, and tokenizers index.
self.add_a_token_with_default_length(token_type, self.next_index, self.index);
}
/// <https://urlpattern.spec.whatwg.org/#add-a-token-with-default-length>
fn add_a_token_with_default_length(
&mut self,
token_type: TokenType,
next_position: usize,
value_position: usize,
) {
// Step 1. Let computed length be next position value position.
let computed_length = next_position - value_position;
// Step 2. Run add a token given tokenizer, type, next position, value position, and computed length.
self.add_a_token(token_type, next_position, value_position, computed_length);
}
/// <https://urlpattern.spec.whatwg.org/#process-a-tokenizing-error>
fn process_a_tokenizing_error(
&mut self,
next_position: usize,
value_position: usize,
) -> Fallible<()> {
// Step 1. If tokenizers policy is "strict", then throw a TypeError.
if self.policy == TokenizePolicy::Strict {
return Err(Error::Type("Failed to tokenize URL pattern".into()));
}
// Step 2. Assert: tokenizers policy is "lenient".
debug_assert_eq!(self.policy, TokenizePolicy::Lenient);
// Step 3. Run add a token with default length given tokenizer, "invalid-char",
// next position, and value position.
self.add_a_token_with_default_length(TokenType::InvalidChar, next_position, value_position);
Ok(())
}
}
/// <https://urlpattern.spec.whatwg.org/#process-a-base-url-string>
fn process_a_base_url_string(input: &str, init_type: PatternInitType) -> String {
// Step 1. Assert: input is not null.

View file

@ -344,27 +344,6 @@
[Pattern: [{"pathname":"/foo{/bar}*"}\] Inputs: [{"pathname":"/foo/"}\]]
expected: FAIL
[Pattern: [{"protocol":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"username":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"password":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hostname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"search":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hash":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"protocol":":café"}\] Inputs: [{"protocol":"foo"}\]]
expected: FAIL
@ -470,18 +449,12 @@
[Pattern: [{"pathname":"\\ud83d \\udeb2"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":":\\ud83d \\udeb2"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":":a󠄀b"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":"test/:a𐑐b"}\] Inputs: [{"pathname":"test/foo"}\]]
expected: FAIL
[Pattern: [{"pathname":":🚲"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"port":""}\] Inputs: [{"protocol":"http","port":"80"}\]]
expected: FAIL
@ -1366,27 +1339,6 @@
[Pattern: [{"pathname":"/foo{/bar}*"}\] Inputs: [{"pathname":"/foo/"}\]]
expected: FAIL
[Pattern: [{"protocol":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"username":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"password":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hostname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"search":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hash":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"protocol":":café"}\] Inputs: [{"protocol":"foo"}\]]
expected: FAIL
@ -1492,18 +1444,12 @@
[Pattern: [{"pathname":"\\ud83d \\udeb2"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":":\\ud83d \\udeb2"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":":a󠄀b"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":"test/:a𐑐b"}\] Inputs: [{"pathname":"test/foo"}\]]
expected: FAIL
[Pattern: [{"pathname":":🚲"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"port":""}\] Inputs: [{"protocol":"http","port":"80"}\]]
expected: FAIL

View file

@ -347,27 +347,6 @@
[Pattern: [{"pathname":"/foo{/bar}*"}\] Inputs: [{"pathname":"/foo/"}\]]
expected: FAIL
[Pattern: [{"protocol":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"username":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"password":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hostname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"search":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hash":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"protocol":":café"}\] Inputs: [{"protocol":"foo"}\]]
expected: FAIL
@ -473,18 +452,12 @@
[Pattern: [{"pathname":"\\ud83d \\udeb2"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":":\\ud83d \\udeb2"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":":a󠄀b"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":"test/:a𐑐b"}\] Inputs: [{"pathname":"test/foo"}\]]
expected: FAIL
[Pattern: [{"pathname":":🚲"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"port":""}\] Inputs: [{"protocol":"http","port":"80"}\]]
expected: FAIL
@ -1366,27 +1339,6 @@
[Pattern: [{"pathname":"/foo{/bar}*"}\] Inputs: [{"pathname":"/foo/"}\]]
expected: FAIL
[Pattern: [{"protocol":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"username":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"password":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hostname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"search":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"hash":"(café)"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"protocol":":café"}\] Inputs: [{"protocol":"foo"}\]]
expected: FAIL
@ -1492,18 +1444,12 @@
[Pattern: [{"pathname":"\\ud83d \\udeb2"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":":\\ud83d \\udeb2"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"pathname":":a󠄀b"}\] Inputs: [\]]
expected: FAIL
[Pattern: [{"pathname":"test/:a𐑐b"}\] Inputs: [{"pathname":"test/foo"}\]]
expected: FAIL
[Pattern: [{"pathname":":🚲"}\] Inputs: undefined]
expected: FAIL
[Pattern: [{"port":""}\] Inputs: [{"protocol":"http","port":"80"}\]]
expected: FAIL