From ef961580eb8a4b48ee22141b8e549f18e5b0ca20 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?D=C3=A1vid=20Komorowicz?= Date: Tue, 19 Mar 2024 17:36:03 +0100 Subject: [PATCH] Update md files --- .../multi_reference/deepremaster/index.md | 19 +++++++++++++++---- .../single_reference/deepremaster/index.md | 15 +++++++++++---- benchmark/single_reference/pdnla_net/index.md | 11 ++++++++++- benchmark/single_reference/unicolor/index.md | 14 ++++++++++++-- benchmark/unconditional/unicolor/index.md | 13 +++++++++++-- 5 files changed, 59 insertions(+), 13 deletions(-) diff --git a/benchmark/multi_reference/deepremaster/index.md b/benchmark/multi_reference/deepremaster/index.md index 7495939..1282d79 100644 --- a/benchmark/multi_reference/deepremaster/index.md +++ b/benchmark/multi_reference/deepremaster/index.md @@ -3,19 +3,24 @@ title: 'Deepremaster: Multi reference' layout: default tag: deepremaster category: multi_reference +last_modified_at: '2024-03-19 16:00:27.259599' --- # Multi reference ## Deepremaster This model is originally designed for film colorization. - To run this benchmark the input image is duplicated 5 times. - The reference images are supposed to be colored frames chosen from the movies. This means that significant differences in the reference images cannot be used, as illustrated below. -Another interesting finding is that the temporal convolution, responsible for homigenizing the color between conscutive frames, learned to color the sky and trees without reference. + +An interesting finding is that certain objects are colored even when they don't appear on the refernce images, as long as those colors are present in the reference images. +This suggests that instead of semantic to semantic matching between gray and reference image, semantic to color correspondence is learned (at least partially). +For example, the sky is colored blue and the leaves green. +The semantic matching takes place in feature space where the spatial information is degraded. +See noise test vs gray test. + | Task | Image #1 | Image #2 | Image #3 | Reference | | ----- | ----- | ----- | ----- | ----- | @@ -26,10 +31,16 @@ Another interesting finding is that the temporal convolution, responsible for ho | Semantic correspondence weak | | | | | | Distractors | | | | | +### Additional Information + +- Last updated: {{ "2024-03-19 16:01:17" | date: site.minima.date_format }} +- Paper: [https://github.com/satoshiiizuka/siggraphasia2019_remastering]() + + ### Other categories: {% for p in site.pages %} {% if p.tag == "deepremaster" and p.url != page.url %} - [{{ p.title }}]({{ p.url | relative_url }}) {% endif %} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/benchmark/single_reference/deepremaster/index.md b/benchmark/single_reference/deepremaster/index.md index cdeddb7..6261337 100644 --- a/benchmark/single_reference/deepremaster/index.md +++ b/benchmark/single_reference/deepremaster/index.md @@ -3,19 +3,18 @@ title: 'Deepremaster: Single reference' layout: default tag: deepremaster category: single_reference +last_modified_at: '2024-03-19 15:58:21.484082' --- # Single reference ## Deepremaster This model is originally designed for film colorization. - To run this benchmark the input image is duplicated 5 times. - The reference images are supposed to be colored frames chosen from the movies. This means that significant differences in the reference images cannot be used, as illustrated below. +('Recolor source' vs other rows) -Another interesting finding is that the temporal convolution, responsible for homigenizing the color between conscutive frames, learned to color the sky and trees without reference. | Task | Image #1 | Image #2 | Image #3 | Reference | | ----- | ----- | ----- | ----- | ----- | @@ -33,6 +32,14 @@ Another interesting finding is that the temporal convolution, responsible for ho | Distractors | | | | | | Random noise | | | | | | Random noise | | | | | +| Gray | | | | | +| Gray | | | | | + +### Additional Information + +- Last updated: {{ "2024-03-19 16:00:27" | date: site.minima.date_format }} +- Paper: [https://github.com/satoshiiizuka/siggraphasia2019_remastering]() + ### Other categories: @@ -40,4 +47,4 @@ Another interesting finding is that the temporal convolution, responsible for ho {% if p.tag == "deepremaster" and p.url != page.url %} - [{{ p.title }}]({{ p.url | relative_url }}) {% endif %} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/benchmark/single_reference/pdnla_net/index.md b/benchmark/single_reference/pdnla_net/index.md index 800dffd..46f0c38 100644 --- a/benchmark/single_reference/pdnla_net/index.md +++ b/benchmark/single_reference/pdnla_net/index.md @@ -3,6 +3,7 @@ title: 'Pdnla net: Single reference' layout: default tag: pdnla_net category: single_reference +last_modified_at: '2024-03-19 16:01:18.739274' --- # Single reference ## Pdnla net @@ -25,6 +26,14 @@ category: single_reference | Distractors | | | | | | Random noise | | | | | | Random noise | | | | | +| Gray | | | | | +| Gray | | | | | + +### Additional Information + +- Last updated: {{ "2024-03-19 16:04:12" | date: site.minima.date_format }} +- Paper: [https://ieeexplore.ieee.org/abstract/document/10183846]() + ### Other categories: @@ -32,4 +41,4 @@ category: single_reference {% if p.tag == "pdnla_net" and p.url != page.url %} - [{{ p.title }}]({{ p.url | relative_url }}) {% endif %} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/benchmark/single_reference/unicolor/index.md b/benchmark/single_reference/unicolor/index.md index 397d925..6db86c0 100644 --- a/benchmark/single_reference/unicolor/index.md +++ b/benchmark/single_reference/unicolor/index.md @@ -3,11 +3,13 @@ title: 'Unicolor: Single reference' layout: default tag: unicolor category: single_reference +last_modified_at: '2024-03-19 16:04:20.252565' --- # Single reference ## Unicolor -This model generate diverse results where the color is not constrained by the reference image. +This model generates diverse results where the color is not constrained by the reference image. + | Task | Image #1 | Image #2 | Image #3 | Reference | | ----- | ----- | ----- | ----- | ----- | @@ -25,6 +27,14 @@ This model generate diverse results where the color is not constrained by the re | Distractors | | | | | | Random noise | | | | | | Random noise | | | | | +| Gray | | | | | +| Gray | | | | | + +### Additional Information + +- Last updated: {{ "2024-03-19 16:26:06" | date: site.minima.date_format }} +- Paper: [https://arxiv.org/abs/2209.11223]() + ### Other categories: @@ -32,4 +42,4 @@ This model generate diverse results where the color is not constrained by the re {% if p.tag == "unicolor" and p.url != page.url %} - [{{ p.title }}]({{ p.url | relative_url }}) {% endif %} -{% endfor %} \ No newline at end of file +{% endfor %} diff --git a/benchmark/unconditional/unicolor/index.md b/benchmark/unconditional/unicolor/index.md index 025a0c3..ae1bd78 100644 --- a/benchmark/unconditional/unicolor/index.md +++ b/benchmark/unconditional/unicolor/index.md @@ -3,11 +3,14 @@ title: 'Unicolor: Unconditional' layout: default tag: unicolor category: unconditional +date: '2024-03-19 15:41:57.893350' +last_modified_at: '2024-03-19 15:41:57.893356' --- # Unconditional ## Unicolor -This model generate diverse results where the color is not constrained by the reference image. +This model generates diverse results where the color is not constrained by the reference image. + | Image #1 | Image #2 | Image #3 | Image #4 | Image #5 | | ----- | ----- | ----- | ----- | ----- | @@ -31,10 +34,16 @@ This model generate diverse results where the color is not constrained by the re | | | | | | | | | | | | +### Additional Information + +- Last updated: {{ "2024-03-19 15:41:57" | date: site.minima.date_format }} +- Paper: [https://arxiv.org/abs/2209.11223]() + + ### Other categories: {% for p in site.pages %} {% if p.tag == "unicolor" and p.url != page.url %} - [{{ p.title }}]({{ p.url | relative_url }}) {% endif %} -{% endfor %} \ No newline at end of file +{% endfor %}