Prevent flaky tests (#514)

Move from example.org to example.com, which seems to be more permissive for testing
This commit is contained in:
Matthias 2022-02-18 10:29:49 +01:00 committed by GitHub
parent 6d56c6b55c
commit 812663d832
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
29 changed files with 147 additions and 144 deletions

View file

@ -15,7 +15,7 @@ jobs:
- name: Link Checker
uses: lycheeverse/lychee-action@master
with:
args: --verbose --no-progress --exclude example.org -- README.md
args: --verbose --no-progress --exclude example.com -- README.md
- name: Create Issue From File
if: github.repository_owner == 'lycheeverse'

View file

@ -227,7 +227,7 @@ FLAGS:
OPTIONS:
-a, --accept <accept> Comma-separated list of accepted status codes for valid links
-b, --base <base> Base URL or website root directory to check relative URLs e.g.
https://example.org or `/path/to/public`
https://example.com or `/path/to/public`
--basic-auth <basic-auth> Basic authentication support. E.g. `username:password`
-c, --config <config-file> Configuration file to use [default: ./lychee.toml]
--exclude <exclude>... Exclude URLs from checking (supports regex)
@ -254,7 +254,7 @@ OPTIONS:
ARGS:
<inputs>... The inputs (where to get links to check from). These can be: files (e.g. `README.md`), file globs
(e.g. `"~/git/*/README.md"`), remote URLs (e.g. `https://example.org/README.md`) or standard
(e.g. `"~/git/*/README.md"`), remote URLs (e.g. `https://example.com/README.md`) or standard
input (`-`). NOTE: Use `--` to separate inputs from options that allow multiple arguments
```

View file

@ -11,7 +11,7 @@ async fn main() -> Result<()> {
// Excludes
let excludes = Some(RegexSet::new(&[r"example"]).unwrap());
// Includes take precedence over excludes
let includes = Some(RegexSet::new(&[r"example.org"]).unwrap());
let includes = Some(RegexSet::new(&[r"example.com"]).unwrap());
// Set custom request headers
let mut headers = HeaderMap::new();
@ -39,7 +39,7 @@ async fn main() -> Result<()> {
.build()
.client()?;
let response = client.check("https://example.org").await?;
let response = client.check("https://example.com").await?;
dbg!(&response);
assert!(response.status().is_success());
Ok(())

View file

@ -13,7 +13,7 @@ async fn main() -> Result<()> {
let (send_resp, mut recv_resp) = mpsc::channel(CONCURRENT_REQUESTS);
// Add as many requests as you like
let requests = vec![Request::try_from("https://example.org")?];
let requests = vec![Request::try_from("https://example.com")?];
// Queue requests
tokio::spawn(async move {

8
fixtures/TEST.md vendored
View file

@ -16,8 +16,8 @@ Some more complex formatting to test that Markdown parsing works.
[![CC0](https://i.creativecommons.org/p/zero/1.0/88x31.png)](https://creativecommons.org/publicdomain/zero/1.0/)
Test HTTP and HTTPS for the same site.
http://example.org
https://example.org
http://example.com
https://example.com
test@example.org
mailto:test2@example.org
test@example.com
mailto:test2@example.com

View file

@ -1,6 +1,6 @@
https://endler.dev
test@example.org
test@example.com
foo@bar.dev
https://example.org
https://example.com
octocat+github@github.com
mailto:test2@example.org
mailto:test2@example.com

View file

@ -1,23 +1,27 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<link rel="home" href="https://example.org/head/home">
<title>Test</title>
<meta name="description" content="Test HTML5 parsing (not valid XML)">
<head>
<meta charset="utf-8" />
<link rel="home" href="https://example.com/head/home" />
<title>Test</title>
<meta name="description" content="Test HTML5 parsing (not valid XML)" />
<!-- The links below have no closing tags (not valid XML) -->
<link rel="icon" type="image/png" sizes="32x32" href="images/icon.png">
<link rel="stylesheet" type="text/css" href="https://example.org/css/style_full_url.css">
<link rel="stylesheet" type="text/css" href="css/style_relative_url.css">
<!-- The links below have no closing tags (not valid XML) -->
<link rel="icon" type="image/png" sizes="32x32" href="images/icon.png" />
<link
rel="stylesheet"
type="text/css"
href="https://example.com/css/style_full_url.css"
/>
<link rel="stylesheet" type="text/css" href="css/style_relative_url.css" />
<!-- The defer attribute has no value (not valid XML) -->
<script defer src="js/script.js"></script>
</head>
<body>
Hello world.
<a href="https://example.org/body/a">Link in body</a>
<!-- Empty a tag might be problematic (in terms of browser support), but should still be parsed -->
<div><a href="https://example.org/body/div_empty_a"/></div>
</body>
<!-- The defer attribute has no value (not valid XML) -->
<script defer src="js/script.js"></script>
</head>
<body>
Hello world.
<a href="https://example.com/body/a">Link in body</a>
<!-- Empty a tag might be problematic (in terms of browser support), but should still be parsed -->
<div><a href="https://example.com/body/div_empty_a" /></div>
</body>
</html>

View file

@ -1,10 +1,15 @@
<!doctype html>
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<some-weird-element href="https://example.org/some-weird-element"></some-weird-element>
<even-weirder fake-attr src="https://example.org/even-weirder-src" href="https://example.org/even-weirder-href"></even-weirder>
<citations cite="https://example.org/citations"></citations>
</body>
<head> </head>
<body>
<some-weird-element
href="https://example.com/some-weird-element"
></some-weird-element>
<even-weirder
fake-attr
src="https://example.com/even-weirder-src"
href="https://example.com/even-weirder-href"
></even-weirder>
<citations cite="https://example.com/citations"></citations>
</body>
</html>

View file

@ -1,8 +1,7 @@
<!doctype html>
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<a href="https://example.org/body/a">Link in body</a>
</body>
<head> </head>
<body>
<a href="https://example.com/body/a">Link in body</a>
</body>
</html>

View file

@ -1,10 +1,9 @@
<!doctype html>
<!DOCTYPE html>
<html lang="en">
<head>
</head>
<body>
<a href="https;//example.org/malformed_one">Malformed link</a>
<a href="https://example]org/malformed_two">Malformed link</a>
<a href="https://example.org/valid">Valid link</a>
</body>
<head> </head>
<body>
<a href="https;//example.com/malformed_one">Malformed link</a>
<a href="https://example]org/malformed_two">Malformed link</a>
<a href="https://example.com/valid">Valid link</a>
</body>
</html>

View file

@ -1 +1 @@
<!DOCTYPE html><html class=no-js lang=en><head><link href=https://example.org/ rel=canonical><link href=https://example.org/favicon.ico rel="shortcut icon"><link crossorigin="" href=https://fonts.externalsite.com rel=preconnect><body><div></div><header><nav><a href=https://example.org/docs/ title=Docs></a><div><a href=https://example.org/ title=Home></a></div></nav></header><div><nav><div><ul><li><a href=https://example.org/forum>Forum</a></ul></div></nav></div>
<!DOCTYPE html><html class=no-js lang=en><head><link href=https://example.com/ rel=canonical><link href=https://example.com/favicon.ico rel="shortcut icon"><link crossorigin="" href=https://fonts.externalsite.com rel=preconnect><body><div></div><header><nav><a href=https://example.com/docs/ title=Docs></a><div><a href=https://example.com/ title=Home></a></div></nav></header><div><nav><div><ul><li><a href=https://example.com/forum>Forum</a></ul></div></nav></div>

View file

@ -1 +1 @@
<a href="http://example.org">Insecure HTTP link</a>
<a href="http://example.com">Insecure HTTP link</a>

View file

@ -1,5 +1,5 @@
These links are all the same and match those of TEST_REPETITION_2.txt.
All links in both files should be counted as one and checked once only.
https://example.org/
https://example.org/
https://example.org
https://example.com/
https://example.com/
https://example.com

View file

@ -1,5 +1,5 @@
These links are all the same and match those of TEST_REPETITION_1.txt.
All links in both files should be counted as one and checked once only.
https://example.org/
https://example.org/
https://example.org
https://example.com/
https://example.com/
https://example.com

View file

@ -1,3 +1,3 @@
https://example.org
http://example.org
https://example.com
http://example.com
slack://channel?id=123

View file

@ -1,3 +1,3 @@
slack://channel?id=123
file:///test_folder/test_file
https://example.org
https://example.com

View file

@ -1,10 +1,7 @@
Test HTTP and HTTPS for the same site.
https://example.org
https://example.com
https://github.com/rust-lang/rust/
https://foo.example.com
https://example.org/bar
https://example.com/bar
http://wikipedia.org
https://github.com/lycheeverse/lychee
file:///path/to/file
mail@example.org
mail@example.com

View file

@ -1 +1 @@
example.com
https://example.com/bar

View file

@ -7,7 +7,7 @@
<p>
<ul>
<li>
<a href="https://example.org">example</a>
<a href="https://example.com">example</a>
</li>
<li>
<a href="/">home</a>

View file

@ -108,7 +108,7 @@ fn parse_base(src: &str) -> Result<Base, lychee_lib::ErrorKind> {
pub(crate) struct LycheeOptions {
/// The inputs (where to get links to check from).
/// These can be: files (e.g. `README.md`), file globs (e.g. `"~/git/*/README.md"`),
/// remote URLs (e.g. `https://example.org/README.md`) or standard input (`-`).
/// remote URLs (e.g. `https://example.com/README.md`) or standard input (`-`).
/// NOTE: Use `--` to separate inputs from options that allow multiple arguments.
#[structopt(name = "inputs", required = true)]
raw_inputs: Vec<String>,
@ -272,7 +272,7 @@ pub(crate) struct Config {
pub(crate) method: String,
/// Base URL or website root directory to check relative URLs
/// e.g. https://example.org or `/path/to/public`
/// e.g. https://example.com or `/path/to/public`
#[structopt(short, long, parse(try_from_str = parse_base))]
#[serde(default)]
pub(crate) base: Option<Base>,

View file

@ -136,7 +136,7 @@ mod test {
stats.add(Response(
InputSource::Stdin,
ResponseBody {
uri: website("https://example.org/ok"),
uri: website("https://example.com/ok"),
status: Status::Ok(StatusCode::OK),
},
));

View file

@ -570,9 +570,8 @@ mod cli {
cmd.current_dir(test_path)
.arg("TEST.md")
.assert()
.success()
.stdout(contains("9 Total"))
.stdout(contains("7 Excluded"));
.stdout(contains("7 Total"))
.stdout(contains("5 Excluded"));
Ok(())
}
@ -589,8 +588,8 @@ mod cli {
.arg(excludes_path)
.assert()
.success()
.stdout(contains("9 Total"))
.stdout(contains("8 Excluded"));
.stdout(contains("7 Total"))
.stdout(contains("6 Excluded"));
Ok(())
}

View file

@ -611,7 +611,7 @@ mod test {
.client()
.unwrap();
assert!(!client.is_excluded(&Uri {
url: "mailto://mail@example.org".try_into().unwrap()
url: "mailto://mail@example.com".try_into().unwrap()
}));
let client = ClientBuilder::builder()
@ -621,14 +621,14 @@ mod test {
.client()
.unwrap();
assert!(client.is_excluded(&Uri {
url: "mailto://mail@example.org".try_into().unwrap()
url: "mailto://mail@example.com".try_into().unwrap()
}));
}
#[tokio::test]
async fn test_require_https() {
let client = ClientBuilder::builder().build().client().unwrap();
let res = client.check("http://example.org").await.unwrap();
let res = client.check("http://example.com").await.unwrap();
assert!(res.status().is_success());
// Same request will fail if HTTPS is required
@ -637,7 +637,7 @@ mod test {
.build()
.client()
.unwrap();
let res = client.check("http://example.org").await.unwrap();
let res = client.check("http://example.com").await.unwrap();
assert!(res.status().is_failure());
}

View file

@ -120,7 +120,7 @@ mod test {
#[tokio::test]
async fn test_url_without_extension_is_html() -> Result<()> {
let input = Input::new("https://example.org/", None, true);
let input = Input::new("https://example.com/", None, true);
let contents: Vec<_> = input.get_contents(true).await.collect::<Vec<_>>().await;
assert_eq!(contents.len(), 1);
@ -286,7 +286,7 @@ mod test {
#[tokio::test]
async fn test_extract_html5_not_valid_xml_relative_links() {
let base = Base::try_from("https://example.org").unwrap();
let base = Base::try_from("https://example.com").unwrap();
let input = load_fixture("TEST_HTML5.html");
let input = Input {
@ -297,13 +297,13 @@ mod test {
let expected_links = HashSet::from_iter([
// the body links wouldn't be present if the file was parsed strictly as XML
website("https://example.org/body/a"),
website("https://example.org/body/div_empty_a"),
website("https://example.org/css/style_full_url.css"),
website("https://example.org/css/style_relative_url.css"),
website("https://example.org/head/home"),
website("https://example.org/images/icon.png"),
website("https://example.org/js/script.js"),
website("https://example.com/body/a"),
website("https://example.com/body/div_empty_a"),
website("https://example.com/css/style_full_url.css"),
website("https://example.com/css/style_relative_url.css"),
website("https://example.com/head/home"),
website("https://example.com/images/icon.png"),
website("https://example.com/js/script.js"),
]);
assert_eq!(links, expected_links);

View file

@ -124,13 +124,13 @@ mod test {
#[test]
fn test_non_markdown_links() {
let input =
"https://endler.dev and https://hello-rust.show/foo/bar?lol=1 at test@example.org";
"https://endler.dev and https://hello-rust.show/foo/bar?lol=1 at test@example.com";
let links: HashSet<Uri> = extract_uris(input, FileType::Plaintext);
let expected = IntoIterator::into_iter([
website("https://endler.dev"),
website("https://hello-rust.show/foo/bar?lol=1"),
mail("test@example.org"),
mail("test@example.com"),
])
.collect::<HashSet<Uri>>();
@ -152,11 +152,11 @@ mod test {
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.org/head/home"),
website("https://example.org/css/style_full_url.css"),
website("https://example.com/head/home"),
website("https://example.com/css/style_full_url.css"),
// the body links wouldn't be present if the file was parsed strictly as XML
website("https://example.org/body/a"),
website("https://example.org/body/div_empty_a"),
website("https://example.com/body/a"),
website("https://example.com/body/div_empty_a"),
])
.collect::<HashSet<Uri>>();
@ -166,7 +166,7 @@ mod test {
#[test]
fn test_extract_relative_url() {
let source = InputSource::RemoteUrl(Box::new(
Url::parse("https://example.org/some-post").unwrap(),
Url::parse("https://example.com/some-post").unwrap(),
));
let contents = r#"<html>
@ -210,7 +210,7 @@ mod test {
let input = load_fixture("TEST_HTML5_LOWERCASE_DOCTYPE.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([website("https://example.org/body/a")])
let expected_links = IntoIterator::into_iter([website("https://example.com/body/a")])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
@ -223,11 +223,11 @@ mod test {
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.org/"),
website("https://example.org/favicon.ico"),
website("https://example.com/"),
website("https://example.com/favicon.ico"),
website("https://fonts.externalsite.com"),
website("https://example.org/docs/"),
website("https://example.org/forum"),
website("https://example.com/docs/"),
website("https://example.com/forum"),
])
.collect::<HashSet<Uri>>();
@ -240,7 +240,7 @@ mod test {
let input = load_fixture("TEST_HTML5_MALFORMED_LINKS.html");
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([website("https://example.org/valid")])
let expected_links = IntoIterator::into_iter([website("https://example.com/valid")])
.collect::<HashSet<Uri>>();
assert_eq!(links, expected_links);
@ -253,10 +253,10 @@ mod test {
let links = extract_uris(&input, FileType::Html);
let expected_links = IntoIterator::into_iter([
website("https://example.org/some-weird-element"),
website("https://example.org/even-weirder-src"),
website("https://example.org/even-weirder-href"),
website("https://example.org/citations"),
website("https://example.com/some-weird-element"),
website("https://example.com/even-weirder-src"),
website("https://example.com/even-weirder-href"),
website("https://example.com/citations"),
])
.collect::<HashSet<Uri>>();

View file

@ -242,7 +242,7 @@ mod test {
// In this case, only the requests matching the include set will be checked
let filter = Filter::default();
assert!(!filter.is_excluded(&website("https://example.org")));
assert!(!filter.is_excluded(&website("https://example.com")));
}
#[test]
@ -253,7 +253,7 @@ mod test {
assert!(filter.is_excluded(&website(
"http://schemas.openxmlformats.org/markup-compatibility/2006"
)));
assert!(!filter.is_excluded(&website("https://example.org")));
assert!(!filter.is_excluded(&website("https://example.com")));
}
#[test]
@ -271,7 +271,7 @@ mod test {
#[test]
fn test_include_regex() {
let includes = Includes {
regex: RegexSet::new(&[r"foo.example.org"]).unwrap(),
regex: RegexSet::new(&[r"foo.example.com"]).unwrap(),
};
let filter = Filter {
includes: Some(includes),
@ -279,9 +279,9 @@ mod test {
};
// Only the requests matching the include set will be checked
assert!(!filter.is_excluded(&website("https://foo.example.org")));
assert!(filter.is_excluded(&website("https://bar.example.org")));
assert!(filter.is_excluded(&website("https://example.org")));
assert!(!filter.is_excluded(&website("https://foo.example.com")));
assert!(filter.is_excluded(&website("https://bar.example.com")));
assert!(filter.is_excluded(&website("https://example.com")));
}
#[test]
@ -291,7 +291,7 @@ mod test {
..Filter::default()
};
assert!(filter.is_excluded(&mail("mail@example.org")));
assert!(filter.is_excluded(&mail("mail@example.com")));
assert!(filter.is_excluded(&mail("foo@bar.dev")));
assert!(!filter.is_excluded(&website("http://bar.dev")));
}
@ -299,7 +299,7 @@ mod test {
#[test]
fn test_exclude_regex() {
let excludes = Excludes {
regex: RegexSet::new(&[r"github.com", r"[a-z]+\.(org|net)", r"@example.org"]).unwrap(),
regex: RegexSet::new(&[r"github.com", r"[a-z]+\.(org|net)", r"@example.com"]).unwrap(),
};
let filter = Filter {
excludes: Some(excludes),
@ -308,7 +308,7 @@ mod test {
assert!(filter.is_excluded(&website("https://github.com")));
assert!(filter.is_excluded(&website("http://exclude.org")));
assert!(filter.is_excluded(&mail("mail@example.org")));
assert!(filter.is_excluded(&mail("mail@example.com")));
assert!(!filter.is_excluded(&website("http://bar.dev")));
assert!(!filter.is_excluded(&mail("foo@bar.dev")));
@ -316,10 +316,10 @@ mod test {
#[test]
fn test_exclude_include_regex() {
let includes = Includes {
regex: RegexSet::new(&[r"foo.example.org"]).unwrap(),
regex: RegexSet::new(&[r"foo.example.com"]).unwrap(),
};
let excludes = Excludes {
regex: RegexSet::new(&[r"example.org"]).unwrap(),
regex: RegexSet::new(&[r"example.com"]).unwrap(),
};
let filter = Filter {
includes: Some(includes),
@ -328,10 +328,10 @@ mod test {
};
// Includes take preference over excludes
assert!(!filter.is_excluded(&website("https://foo.example.org")),);
assert!(!filter.is_excluded(&website("https://foo.example.com")),);
assert!(filter.is_excluded(&website("https://example.org")));
assert!(filter.is_excluded(&website("https://bar.example.org")));
assert!(filter.is_excluded(&website("https://example.com")));
assert!(filter.is_excluded(&website("https://bar.example.com")));
}
#[test]

View file

@ -43,8 +43,8 @@ mod test_fs_tree {
"/index.html"
);
assert_eq!(
remove_get_params_and_fragment("https://example.org/index.html?foo=bar"),
"https://example.org/index.html"
remove_get_params_and_fragment("https://example.com/index.html?foo=bar"),
"https://example.com/index.html"
);
assert_eq!(
remove_get_params_and_fragment("test.png?foo=bar"),
@ -52,12 +52,12 @@ mod test_fs_tree {
);
assert_eq!(
remove_get_params_and_fragment("https://example.org/index.html#anchor"),
"https://example.org/index.html"
remove_get_params_and_fragment("https://example.com/index.html#anchor"),
"https://example.com/index.html"
);
assert_eq!(
remove_get_params_and_fragment("https://example.org/index.html?foo=bar#anchor"),
"https://example.org/index.html"
remove_get_params_and_fragment("https://example.com/index.html?foo=bar#anchor"),
"https://example.com/index.html"
);
assert_eq!(
remove_get_params_and_fragment("test.png?foo=bar#anchor"),

View file

@ -102,13 +102,13 @@ mod test_base {
#[test]
fn test_get_base_from_url() {
for (url, expected) in [
("https://example.org", "https://example.org"),
("https://example.org?query=something", "https://example.org"),
("https://example.org/#anchor", "https://example.org"),
("https://example.org/foo/bar", "https://example.org"),
("https://example.com", "https://example.com"),
("https://example.com?query=something", "https://example.com"),
("https://example.com/#anchor", "https://example.com"),
("https://example.com/foo/bar", "https://example.com"),
(
"https://example.org:1234/foo/bar",
"https://example.org:1234",
"https://example.com:1234/foo/bar",
"https://example.com:1234",
),
] {
let url = Url::parse(url).unwrap();

View file

@ -97,7 +97,7 @@ impl Uri {
#[inline]
#[must_use]
/// Returns the domain of the URI (e.g. `example.org`)
/// Returns the domain of the URI (e.g. `example.com`)
pub fn domain(&self) -> Option<&str> {
self.url.domain()
}
@ -278,20 +278,20 @@ mod test {
fn test_uri_from_str() {
assert!(Uri::try_from("").is_err());
assert_eq!(
Uri::try_from("https://example.org"),
Ok(website("https://example.org"))
Uri::try_from("https://example.com"),
Ok(website("https://example.com"))
);
assert_eq!(
Uri::try_from("https://example.org/@test/testing"),
Ok(website("https://example.org/@test/testing"))
Uri::try_from("https://example.com/@test/testing"),
Ok(website("https://example.com/@test/testing"))
);
assert_eq!(
Uri::try_from("mail@example.org"),
Ok(mail("mail@example.org"))
Uri::try_from("mail@example.com"),
Ok(mail("mail@example.com"))
);
assert_eq!(
Uri::try_from("mailto:mail@example.org"),
Ok(mail("mail@example.org"))
Uri::try_from("mailto:mail@example.com"),
Ok(mail("mail@example.com"))
);
}