Skip to content

Commit

Permalink
Change gaussian kernel to anisotropic kernel. (#199)
Browse files Browse the repository at this point in the history
Change gaussian kernel to anisotropic kernel. (#199)
  • Loading branch information
lllyasviel committed Aug 18, 2023
1 parent cb5c4b1 commit 4f0777e
Show file tree
Hide file tree
Showing 7 changed files with 197 additions and 36 deletions.
3 changes: 3 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,9 @@ __pycache__
*.ckpt
*.safetensors
*.pth
lena.png
lena_result.png
lena_test.py
!taesdxl_decoder.pth
/repositories
/venv
Expand Down
2 changes: 1 addition & 1 deletion fooocus_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '1.0.35'
version = '1.0.36'
185 changes: 185 additions & 0 deletions modules/anisotropic.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,185 @@
import torch


Tensor = torch.Tensor
Device = torch.DeviceObjType
Dtype = torch.Type
pad = torch.nn.functional.pad


def _compute_zero_padding(kernel_size: tuple[int, int] | int) -> tuple[int, int]:
ky, kx = _unpack_2d_ks(kernel_size)
return (ky - 1) // 2, (kx - 1) // 2


def _unpack_2d_ks(kernel_size: tuple[int, int] | int) -> tuple[int, int]:
if isinstance(kernel_size, int):
ky = kx = kernel_size
else:
assert len(kernel_size) == 2, '2D Kernel size should have a length of 2.'
ky, kx = kernel_size

ky = int(ky)
kx = int(kx)
return ky, kx


def gaussian(
window_size: int, sigma: Tensor | float, *, device: Device | None = None, dtype: Dtype | None = None
) -> Tensor:

batch_size = sigma.shape[0]

x = (torch.arange(window_size, device=sigma.device, dtype=sigma.dtype) - window_size // 2).expand(batch_size, -1)

if window_size % 2 == 0:
x = x + 0.5

gauss = torch.exp(-x.pow(2.0) / (2 * sigma.pow(2.0)))

return gauss / gauss.sum(-1, keepdim=True)


def get_gaussian_kernel1d(
kernel_size: int,
sigma: float | Tensor,
force_even: bool = False,
*,
device: Device | None = None,
dtype: Dtype | None = None,
) -> Tensor:

return gaussian(kernel_size, sigma, device=device, dtype=dtype)


def get_gaussian_kernel2d(
kernel_size: tuple[int, int] | int,
sigma: tuple[float, float] | Tensor,
force_even: bool = False,
*,
device: Device | None = None,
dtype: Dtype | None = None,
) -> Tensor:

sigma = torch.Tensor([[sigma, sigma]]).to(device=device, dtype=dtype)

ksize_y, ksize_x = _unpack_2d_ks(kernel_size)
sigma_y, sigma_x = sigma[:, 0, None], sigma[:, 1, None]

kernel_y = get_gaussian_kernel1d(ksize_y, sigma_y, force_even, device=device, dtype=dtype)[..., None]
kernel_x = get_gaussian_kernel1d(ksize_x, sigma_x, force_even, device=device, dtype=dtype)[..., None]

return kernel_y * kernel_x.view(-1, 1, ksize_x)


def _bilateral_blur(
input: Tensor,
guidance: Tensor | None,
kernel_size: tuple[int, int] | int,
sigma_color: float | Tensor,
sigma_space: tuple[float, float] | Tensor,
border_type: str = 'reflect',
color_distance_type: str = 'l1',
) -> Tensor:

if isinstance(sigma_color, Tensor):
sigma_color = sigma_color.to(device=input.device, dtype=input.dtype).view(-1, 1, 1, 1, 1)

ky, kx = _unpack_2d_ks(kernel_size)
pad_y, pad_x = _compute_zero_padding(kernel_size)

padded_input = pad(input, (pad_x, pad_x, pad_y, pad_y), mode=border_type)
unfolded_input = padded_input.unfold(2, ky, 1).unfold(3, kx, 1).flatten(-2) # (B, C, H, W, Ky x Kx)

if guidance is None:
guidance = input
unfolded_guidance = unfolded_input
else:
padded_guidance = pad(guidance, (pad_x, pad_x, pad_y, pad_y), mode=border_type)
unfolded_guidance = padded_guidance.unfold(2, ky, 1).unfold(3, kx, 1).flatten(-2) # (B, C, H, W, Ky x Kx)

diff = unfolded_guidance - guidance.unsqueeze(-1)
if color_distance_type == "l1":
color_distance_sq = diff.abs().sum(1, keepdim=True).square()
elif color_distance_type == "l2":
color_distance_sq = diff.square().sum(1, keepdim=True)
else:
raise ValueError("color_distance_type only acceps l1 or l2")
color_kernel = (-0.5 / sigma_color**2 * color_distance_sq).exp() # (B, 1, H, W, Ky x Kx)

space_kernel = get_gaussian_kernel2d(kernel_size, sigma_space, device=input.device, dtype=input.dtype)
space_kernel = space_kernel.view(-1, 1, 1, 1, kx * ky)

kernel = space_kernel * color_kernel
out = (unfolded_input * kernel).sum(-1) / kernel.sum(-1)
return out


def bilateral_blur(
input: Tensor,
kernel_size: tuple[int, int] | int = (13, 13),
sigma_color: float | Tensor = 3.0,
sigma_space: tuple[float, float] | Tensor = 3.0,
border_type: str = 'reflect',
color_distance_type: str = 'l1',
) -> Tensor:
return _bilateral_blur(input, None, kernel_size, sigma_color, sigma_space, border_type, color_distance_type)


def joint_bilateral_blur(
input: Tensor,
guidance: Tensor,
kernel_size: tuple[int, int] | int,
sigma_color: float | Tensor,
sigma_space: tuple[float, float] | Tensor,
border_type: str = 'reflect',
color_distance_type: str = 'l1',
) -> Tensor:
return _bilateral_blur(input, guidance, kernel_size, sigma_color, sigma_space, border_type, color_distance_type)


class _BilateralBlur(torch.nn.Module):
def __init__(
self,
kernel_size: tuple[int, int] | int,
sigma_color: float | Tensor,
sigma_space: tuple[float, float] | Tensor,
border_type: str = 'reflect',
color_distance_type: str = "l1",
) -> None:
super().__init__()
self.kernel_size = kernel_size
self.sigma_color = sigma_color
self.sigma_space = sigma_space
self.border_type = border_type
self.color_distance_type = color_distance_type

def __repr__(self) -> str:
return (
f"{self.__class__.__name__}"
f"(kernel_size={self.kernel_size}, "
f"sigma_color={self.sigma_color}, "
f"sigma_space={self.sigma_space}, "
f"border_type={self.border_type}, "
f"color_distance_type={self.color_distance_type})"
)


class BilateralBlur(_BilateralBlur):
def forward(self, input: Tensor) -> Tensor:
return bilateral_blur(
input, self.kernel_size, self.sigma_color, self.sigma_space, self.border_type, self.color_distance_type
)


class JointBilateralBlur(_BilateralBlur):
def forward(self, input: Tensor, guidance: Tensor) -> Tensor:
return joint_bilateral_blur(
input,
guidance,
self.kernel_size,
self.sigma_color,
self.sigma_space,
self.border_type,
self.color_distance_type,
)
32 changes: 0 additions & 32 deletions modules/filters.py

This file was deleted.

5 changes: 3 additions & 2 deletions modules/patch.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@
import comfy.model_base
import comfy.ldm.modules.diffusionmodules.openaimodel
import comfy.samplers
import modules.anisotropic as anisotropic

from comfy.samplers import model_management, lcm, math
from comfy.ldm.modules.diffusionmodules.openaimodel import timestep_embedding, forward_timestep_embed
from modules.filters import gaussian_filter_2d


sharpness = 2.0

Expand Down Expand Up @@ -349,7 +350,7 @@ def unet_forward_patched(self, x, timesteps=None, context=None, y=None, control=

alpha = 1.0 - (timesteps / 999.0)[:, None, None, None].clone()
alpha *= 0.001 * sharpness
degraded_x0 = gaussian_filter_2d(x0) * alpha + x0 * (1.0 - alpha)
degraded_x0 = anisotropic.bilateral_blur(x0) * alpha + x0 * (1.0 - alpha)

x0 = x0 * uc_mask + degraded_x0 * (1.0 - uc_mask)

Expand Down
2 changes: 1 addition & 1 deletion readme.md
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ Note that some of these tricks are currently (2023 Aug 11) impossible to reprodu

1. Native refiner swap inside one single k-sampler. The advantage is that now the refiner model can reuse the base model's momentum (or ODE's history parameters) collected from k-sampling to achieve more coherent sampling. In Automatic1111's high-res fix and ComfyUI's node system, the base model and refiner use two independent k-samplers, which means the momentum is largely wasted, and the sampling continuity is broken. Fooocus uses its own advanced k-diffusion sampling that ensures seamless, native, and continuous swap in a refiner setup. (Update Aug 13: Actually I discussed this with Automatic1111 several days ago and it seems that the “native refiner swap inside one single k-sampler” is [merged]( https://github.com/AUTOMATIC1111/stable-diffusion-webui/pull/12371) into the dev branch of webui. Great!)
2. Negative ADM guidance. Because the highest resolution level of XL Base does not have cross attentions, the positive and negative signals for XL's highest resolution level cannot receive enough contrasts during the CFG sampling, causing the results look a bit plastic or overly smooth in certain cases. Fortunately, since the XL's highest resolution level is still conditioned on image aspect ratios (ADM), we can modify the adm on the positive/negative side to compensate for the lack of CFG contrast in the highest resolution level. (Update Aug 16, the IOS App [Drawing Things](https://apps.apple.com/us/app/draw-things-ai-generation/id6444050820) will support Negative ADM Guidance. Great!)
3. We implemented a carefully tuned variation of the Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield overly smooth or plastic appearance. This can almostly eliminate all cases that XL still occasionally produce overly smooth results even with negative ADM guidance.
3. We implemented a carefully tuned variation of the Section 5.1 of ["Improving Sample Quality of Diffusion Models Using Self-Attention Guidance"](https://arxiv.org/pdf/2210.00939.pdf). The weight is set to very low, but this is Fooocus's final guarantee to make sure that the XL will never yield overly smooth or plastic appearance. This can almostly eliminate all cases that XL still occasionally produce overly smooth results even with negative ADM guidance. (Update 2023 Aug 18, the Gaussian kernel of SAM is changed to an anisotropic kernel for better structure preservation and fewer artifacts.)
4. We modified the style templates a bit and added the "cinematic-default".
5. We tested the "sd_xl_offset_example-lora_1.0.safetensors" and it seems that when the lora weight is below 0.5, the results are always better than XL without lora.
6. The parameters of samplers are carefully tuned.
Expand Down
4 changes: 4 additions & 0 deletions update_log.md
Original file line number Diff line number Diff line change
@@ -1,3 +1,7 @@
### 1.0.36

* Change gaussian kernel to anisotropic kernel.

### 1.0.34

* Random seed restoring.
Expand Down

0 comments on commit 4f0777e

Please sign in to comment.