mirror of
https://github.com/Hopiu/django-imagekit.git
synced 2026-03-16 21:30:23 +00:00
Remove PILKit functionality
This commit removes the functionality now in the PILKit project, and adds PILKit as a dependency. Import hooks have been used to expose the processors under "imagekit.processors".
This commit is contained in:
parent
d0ba353be3
commit
36313194ac
10 changed files with 40 additions and 981 deletions
|
|
@ -1,5 +1,5 @@
|
|||
# flake8: noqa
|
||||
|
||||
from . import importers
|
||||
from . import conf
|
||||
from . import generatorlibrary
|
||||
from .specs import ImageSpec
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
from pilkit.exceptions import UnknownExtension, UnknownFormat
|
||||
|
||||
|
||||
class AlreadyRegistered(Exception):
|
||||
pass
|
||||
|
||||
|
|
@ -6,13 +9,10 @@ class NotRegistered(Exception):
|
|||
pass
|
||||
|
||||
|
||||
class UnknownExtensionError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnknownFormatError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class MissingGeneratorId(Exception):
|
||||
pass
|
||||
|
||||
|
||||
# Aliases for backwards compatibility
|
||||
UnknownExtensionError = UnknownExtension
|
||||
UnknownFormatError = UnknownFormat
|
||||
|
|
|
|||
29
imagekit/importers.py
Normal file
29
imagekit/importers.py
Normal file
|
|
@ -0,0 +1,29 @@
|
|||
from django.utils.importlib import import_module
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
class ProcessorImporter(object):
|
||||
"""
|
||||
The processors were moved to the PILKit project so they could be used
|
||||
separtely from ImageKit (which has a bunch of Django dependencies). However,
|
||||
there's no real need to expose this fact (and we want to maintain backwards
|
||||
compatibility), so we proxy all "imagekit.processors" imports to
|
||||
"pilkit.processors" using this object.
|
||||
|
||||
"""
|
||||
pattern = re.compile(r'^imagekit\.processors((\..*)?)$')
|
||||
|
||||
def find_module(self, name, path=None):
|
||||
if self.pattern.match(name):
|
||||
return self
|
||||
|
||||
def load_module(self, name):
|
||||
if name in sys.modules:
|
||||
return sys.modules[name]
|
||||
|
||||
new_name = self.pattern.sub(r'pilkit.processors\1', name)
|
||||
return import_module(new_name)
|
||||
|
||||
|
||||
sys.meta_path.append(ProcessorImporter())
|
||||
|
|
@ -1,15 +0,0 @@
|
|||
# flake8: noqa
|
||||
|
||||
"""
|
||||
Imagekit image processors.
|
||||
|
||||
A processor accepts an image, does some stuff, and returns the result.
|
||||
Processors can do anything with the image you want, but their responsibilities
|
||||
should be limited to image manipulations--they should be completely decoupled
|
||||
from both the filesystem and the ORM.
|
||||
|
||||
"""
|
||||
|
||||
from .base import *
|
||||
from .crop import *
|
||||
from .resize import *
|
||||
|
|
@ -1,209 +0,0 @@
|
|||
from imagekit.lib import Image, ImageColor, ImageEnhance
|
||||
|
||||
|
||||
class ProcessorPipeline(list):
|
||||
"""
|
||||
A :class:`list` of other processors. This class allows any object that
|
||||
knows how to deal with a single processor to deal with a list of them.
|
||||
For example::
|
||||
|
||||
processed_image = ProcessorPipeline([ProcessorA(), ProcessorB()]).process(image)
|
||||
|
||||
"""
|
||||
def process(self, img):
|
||||
for proc in self:
|
||||
img = proc.process(img)
|
||||
return img
|
||||
|
||||
|
||||
class Adjust(object):
|
||||
"""
|
||||
Performs color, brightness, contrast, and sharpness enhancements on the
|
||||
image. See :mod:`PIL.ImageEnhance` for more imformation.
|
||||
|
||||
"""
|
||||
def __init__(self, color=1.0, brightness=1.0, contrast=1.0, sharpness=1.0):
|
||||
"""
|
||||
:param color: A number between 0 and 1 that specifies the saturation
|
||||
of the image. 0 corresponds to a completely desaturated image
|
||||
(black and white) and 1 to the original color.
|
||||
See :class:`PIL.ImageEnhance.Color`
|
||||
:param brightness: A number representing the brightness; 0 results in
|
||||
a completely black image whereas 1 corresponds to the brightness
|
||||
of the original. See :class:`PIL.ImageEnhance.Brightness`
|
||||
:param contrast: A number representing the contrast; 0 results in a
|
||||
completely gray image whereas 1 corresponds to the contrast of
|
||||
the original. See :class:`PIL.ImageEnhance.Contrast`
|
||||
:param sharpness: A number representing the sharpness; 0 results in a
|
||||
blurred image; 1 corresponds to the original sharpness; 2
|
||||
results in a sharpened image. See
|
||||
:class:`PIL.ImageEnhance.Sharpness`
|
||||
|
||||
"""
|
||||
self.color = color
|
||||
self.brightness = brightness
|
||||
self.contrast = contrast
|
||||
self.sharpness = sharpness
|
||||
|
||||
def process(self, img):
|
||||
original = img = img.convert('RGBA')
|
||||
for name in ['Color', 'Brightness', 'Contrast', 'Sharpness']:
|
||||
factor = getattr(self, name.lower())
|
||||
if factor != 1.0:
|
||||
try:
|
||||
img = getattr(ImageEnhance, name)(img).enhance(factor)
|
||||
except ValueError:
|
||||
pass
|
||||
else:
|
||||
# PIL's Color and Contrast filters both convert the image
|
||||
# to L mode, losing transparency info, so we put it back.
|
||||
# See https://github.com/jdriscoll/django-imagekit/issues/64
|
||||
if name in ('Color', 'Contrast'):
|
||||
img = Image.merge('RGBA', img.split()[:3] +
|
||||
original.split()[3:4])
|
||||
return img
|
||||
|
||||
|
||||
class Reflection(object):
|
||||
"""
|
||||
Creates an image with a reflection.
|
||||
|
||||
"""
|
||||
def __init__(self, background_color='#FFFFFF', size=0.0, opacity=0.6):
|
||||
self.background_color = background_color
|
||||
self.size = size
|
||||
self.opacity = opacity
|
||||
|
||||
def process(self, img):
|
||||
# Convert bgcolor string to RGB value.
|
||||
background_color = ImageColor.getrgb(self.background_color)
|
||||
# Handle palleted images.
|
||||
img = img.convert('RGBA')
|
||||
# Copy orignial image and flip the orientation.
|
||||
reflection = img.copy().transpose(Image.FLIP_TOP_BOTTOM)
|
||||
# Create a new image filled with the bgcolor the same size.
|
||||
background = Image.new("RGBA", img.size, background_color)
|
||||
# Calculate our alpha mask.
|
||||
start = int(255 - (255 * self.opacity)) # The start of our gradient.
|
||||
steps = int(255 * self.size) # The number of intermedite values.
|
||||
increment = (255 - start) / float(steps)
|
||||
mask = Image.new('L', (1, 255))
|
||||
for y in range(255):
|
||||
if y < steps:
|
||||
val = int(y * increment + start)
|
||||
else:
|
||||
val = 255
|
||||
mask.putpixel((0, y), val)
|
||||
alpha_mask = mask.resize(img.size)
|
||||
# Merge the reflection onto our background color using the alpha mask.
|
||||
reflection = Image.composite(background, reflection, alpha_mask)
|
||||
# Crop the reflection.
|
||||
reflection_height = int(img.size[1] * self.size)
|
||||
reflection = reflection.crop((0, 0, img.size[0], reflection_height))
|
||||
# Create new image sized to hold both the original image and
|
||||
# the reflection.
|
||||
composite = Image.new("RGBA", (img.size[0], img.size[1] + reflection_height), background_color)
|
||||
# Paste the orignal image and the reflection into the composite image.
|
||||
composite.paste(img, (0, 0))
|
||||
composite.paste(reflection, (0, img.size[1]))
|
||||
# Return the image complete with reflection effect.
|
||||
return composite
|
||||
|
||||
|
||||
class Transpose(object):
|
||||
"""
|
||||
Rotates or flips the image.
|
||||
|
||||
"""
|
||||
AUTO = 'auto'
|
||||
FLIP_HORIZONTAL = Image.FLIP_LEFT_RIGHT
|
||||
FLIP_VERTICAL = Image.FLIP_TOP_BOTTOM
|
||||
ROTATE_90 = Image.ROTATE_90
|
||||
ROTATE_180 = Image.ROTATE_180
|
||||
ROTATE_270 = Image.ROTATE_270
|
||||
|
||||
methods = [AUTO]
|
||||
_EXIF_ORIENTATION_STEPS = {
|
||||
1: [],
|
||||
2: [FLIP_HORIZONTAL],
|
||||
3: [ROTATE_180],
|
||||
4: [FLIP_VERTICAL],
|
||||
5: [ROTATE_270, FLIP_HORIZONTAL],
|
||||
6: [ROTATE_270],
|
||||
7: [ROTATE_90, FLIP_HORIZONTAL],
|
||||
8: [ROTATE_90],
|
||||
}
|
||||
|
||||
def __init__(self, *args):
|
||||
"""
|
||||
Possible arguments:
|
||||
- Transpose.AUTO
|
||||
- Transpose.FLIP_HORIZONTAL
|
||||
- Transpose.FLIP_VERTICAL
|
||||
- Transpose.ROTATE_90
|
||||
- Transpose.ROTATE_180
|
||||
- Transpose.ROTATE_270
|
||||
|
||||
The order of the arguments dictates the order in which the
|
||||
Transposition steps are taken.
|
||||
|
||||
If Transpose.AUTO is present, all other arguments are ignored, and
|
||||
the processor will attempt to rotate the image according to the
|
||||
EXIF Orientation data.
|
||||
|
||||
"""
|
||||
super(Transpose, self).__init__()
|
||||
if args:
|
||||
self.methods = args
|
||||
|
||||
def process(self, img):
|
||||
if self.AUTO in self.methods:
|
||||
try:
|
||||
orientation = img._getexif()[0x0112]
|
||||
ops = self._EXIF_ORIENTATION_STEPS[orientation]
|
||||
except (KeyError, TypeError, AttributeError):
|
||||
ops = []
|
||||
else:
|
||||
ops = self.methods
|
||||
for method in ops:
|
||||
img = img.transpose(method)
|
||||
return img
|
||||
|
||||
|
||||
class Anchor(object):
|
||||
"""
|
||||
Defines all the anchor points needed by the various processor classes.
|
||||
|
||||
"""
|
||||
TOP_LEFT = 'tl'
|
||||
TOP = 't'
|
||||
TOP_RIGHT = 'tr'
|
||||
BOTTOM_LEFT = 'bl'
|
||||
BOTTOM = 'b'
|
||||
BOTTOM_RIGHT = 'br'
|
||||
CENTER = 'c'
|
||||
LEFT = 'l'
|
||||
RIGHT = 'r'
|
||||
|
||||
_ANCHOR_PTS = {
|
||||
TOP_LEFT: (0, 0),
|
||||
TOP: (0.5, 0),
|
||||
TOP_RIGHT: (1, 0),
|
||||
LEFT: (0, 0.5),
|
||||
CENTER: (0.5, 0.5),
|
||||
RIGHT: (1, 0.5),
|
||||
BOTTOM_LEFT: (0, 1),
|
||||
BOTTOM: (0.5, 1),
|
||||
BOTTOM_RIGHT: (1, 1),
|
||||
}
|
||||
|
||||
@staticmethod
|
||||
def get_tuple(anchor):
|
||||
"""Normalizes anchor values (strings or tuples) to tuples.
|
||||
|
||||
"""
|
||||
# If the user passed in one of the string values, convert it to a
|
||||
# percentage tuple.
|
||||
if anchor in Anchor._ANCHOR_PTS.keys():
|
||||
anchor = Anchor._ANCHOR_PTS[anchor]
|
||||
return anchor
|
||||
|
|
@ -1,170 +0,0 @@
|
|||
from .base import Anchor # noqa
|
||||
from .utils import histogram_entropy
|
||||
from ..lib import Image, ImageChops, ImageDraw, ImageStat
|
||||
|
||||
|
||||
class Side(object):
|
||||
TOP = 't'
|
||||
RIGHT = 'r'
|
||||
BOTTOM = 'b'
|
||||
LEFT = 'l'
|
||||
ALL = (TOP, RIGHT, BOTTOM, LEFT)
|
||||
|
||||
|
||||
def _crop(img, bbox, sides=Side.ALL):
|
||||
bbox = (
|
||||
bbox[0] if Side.LEFT in sides else 0,
|
||||
bbox[1] if Side.TOP in sides else 0,
|
||||
bbox[2] if Side.RIGHT in sides else img.size[0],
|
||||
bbox[3] if Side.BOTTOM in sides else img.size[1],
|
||||
)
|
||||
return img.crop(bbox)
|
||||
|
||||
|
||||
def detect_border_color(img):
|
||||
mask = Image.new('1', img.size, 1)
|
||||
w, h = img.size[0] - 2, img.size[1] - 2
|
||||
if w > 0 and h > 0:
|
||||
draw = ImageDraw.Draw(mask)
|
||||
draw.rectangle([1, 1, w, h], 0)
|
||||
return ImageStat.Stat(img.convert('RGBA').histogram(mask)).median
|
||||
|
||||
|
||||
class TrimBorderColor(object):
|
||||
"""Trims a color from the sides of an image.
|
||||
|
||||
"""
|
||||
def __init__(self, color=None, tolerance=0.3, sides=Side.ALL):
|
||||
"""
|
||||
:param color: The color to trim from the image, in a 4-tuple RGBA value,
|
||||
where each component is an integer between 0 and 255, inclusive. If
|
||||
no color is provided, the processor will attempt to detect the
|
||||
border color automatically.
|
||||
:param tolerance: A number between 0 and 1 where 0. Zero is the least
|
||||
tolerant and one is the most.
|
||||
:param sides: A list of sides that should be trimmed. Possible values
|
||||
are provided by the :class:`Side` enum class.
|
||||
|
||||
"""
|
||||
self.color = color
|
||||
self.sides = sides
|
||||
self.tolerance = tolerance
|
||||
|
||||
def process(self, img):
|
||||
source = img.convert('RGBA')
|
||||
border_color = self.color or tuple(detect_border_color(source))
|
||||
bg = Image.new('RGBA', img.size, border_color)
|
||||
diff = ImageChops.difference(source, bg)
|
||||
if self.tolerance not in (0, 1):
|
||||
# If tolerance is zero, we've already done the job. A tolerance of
|
||||
# one would mean to trim EVERY color, and since that would result
|
||||
# in a zero-sized image, we just ignore it.
|
||||
if not 0 <= self.tolerance <= 1:
|
||||
raise ValueError('%s is an invalid tolerance. Acceptable values'
|
||||
' are between 0 and 1 (inclusive).' % self.tolerance)
|
||||
tmp = ImageChops.constant(diff, int(self.tolerance * 255)) \
|
||||
.convert('RGBA')
|
||||
diff = ImageChops.subtract(diff, tmp)
|
||||
|
||||
bbox = diff.getbbox()
|
||||
if bbox:
|
||||
img = _crop(img, bbox, self.sides)
|
||||
return img
|
||||
|
||||
|
||||
class Crop(object):
|
||||
"""
|
||||
Crops an image, cropping it to the specified width and height. You may
|
||||
optionally provide either an anchor or x and y coordinates. This processor
|
||||
functions exactly the same as ``ResizeCanvas`` except that it will never
|
||||
enlarge the image.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, width=None, height=None, anchor=None, x=None, y=None):
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.anchor = anchor
|
||||
self.x = x
|
||||
self.y = y
|
||||
|
||||
def process(self, img):
|
||||
from .resize import ResizeCanvas
|
||||
|
||||
original_width, original_height = img.size
|
||||
new_width, new_height = min(original_width, self.width), \
|
||||
min(original_height, self.height)
|
||||
|
||||
return ResizeCanvas(new_width, new_height, anchor=self.anchor,
|
||||
x=self.x, y=self.y).process(img)
|
||||
|
||||
|
||||
class SmartCrop(object):
|
||||
"""
|
||||
Crop an image to the specified dimensions, whittling away the parts of the
|
||||
image with the least entropy.
|
||||
|
||||
Based on smart crop implementation from easy-thumbnails:
|
||||
https://github.com/SmileyChris/easy-thumbnails/blob/master/easy_thumbnails/processors.py#L193
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, width=None, height=None):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
|
||||
"""
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
def compare_entropy(self, start_slice, end_slice, slice, difference):
|
||||
"""
|
||||
Calculate the entropy of two slices (from the start and end of an axis),
|
||||
returning a tuple containing the amount that should be added to the start
|
||||
and removed from the end of the axis.
|
||||
|
||||
"""
|
||||
start_entropy = histogram_entropy(start_slice)
|
||||
end_entropy = histogram_entropy(end_slice)
|
||||
|
||||
if end_entropy and abs(start_entropy / end_entropy - 1) < 0.01:
|
||||
# Less than 1% difference, remove from both sides.
|
||||
if difference >= slice * 2:
|
||||
return slice, slice
|
||||
half_slice = slice // 2
|
||||
return half_slice, slice - half_slice
|
||||
|
||||
if start_entropy > end_entropy:
|
||||
return 0, slice
|
||||
else:
|
||||
return slice, 0
|
||||
|
||||
def process(self, img):
|
||||
source_x, source_y = img.size
|
||||
diff_x = int(source_x - min(source_x, self.width))
|
||||
diff_y = int(source_y - min(source_y, self.height))
|
||||
left = top = 0
|
||||
right, bottom = source_x, source_y
|
||||
|
||||
while diff_x:
|
||||
slice = min(diff_x, max(diff_x // 5, 10))
|
||||
start = img.crop((left, 0, left + slice, source_y))
|
||||
end = img.crop((right - slice, 0, right, source_y))
|
||||
add, remove = self.compare_entropy(start, end, slice, diff_x)
|
||||
left += add
|
||||
right -= remove
|
||||
diff_x = diff_x - add - remove
|
||||
|
||||
while diff_y:
|
||||
slice = min(diff_y, max(diff_y // 5, 10))
|
||||
start = img.crop((0, top, source_x, top + slice))
|
||||
end = img.crop((0, bottom - slice, source_x, bottom))
|
||||
add, remove = self.compare_entropy(start, end, slice, diff_y)
|
||||
top += add
|
||||
bottom -= remove
|
||||
diff_y = diff_y - add - remove
|
||||
|
||||
box = (left, top, right, bottom)
|
||||
img = img.crop(box)
|
||||
return img
|
||||
|
|
@ -1,260 +0,0 @@
|
|||
from imagekit.lib import Image
|
||||
from .base import Anchor
|
||||
|
||||
|
||||
class Resize(object):
|
||||
"""
|
||||
Resizes an image to the specified width and height.
|
||||
|
||||
"""
|
||||
def __init__(self, width, height):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
|
||||
"""
|
||||
self.width = width
|
||||
self.height = height
|
||||
|
||||
def process(self, img):
|
||||
return img.resize((self.width, self.height), Image.ANTIALIAS)
|
||||
|
||||
|
||||
class ResizeToCover(object):
|
||||
"""
|
||||
Resizes the image to the smallest possible size that will entirely cover the
|
||||
provided dimensions. You probably won't be using this processor directly,
|
||||
but it's used internally by ``ResizeToFill`` and ``SmartResize``.
|
||||
|
||||
"""
|
||||
def __init__(self, width, height):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
|
||||
"""
|
||||
self.width, self.height = width, height
|
||||
|
||||
def process(self, img):
|
||||
original_width, original_height = img.size
|
||||
ratio = max(float(self.width) / original_width,
|
||||
float(self.height) / original_height)
|
||||
new_width, new_height = (int(original_width * ratio),
|
||||
int(original_height * ratio))
|
||||
return Resize(new_width, new_height).process(img)
|
||||
|
||||
|
||||
class ResizeToFill(object):
|
||||
"""
|
||||
Resizes an image, cropping it to the exact specified width and height.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, width=None, height=None, anchor=None):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
:param anchor: Specifies which part of the image should be retained
|
||||
when cropping.
|
||||
"""
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.anchor = anchor
|
||||
|
||||
def process(self, img):
|
||||
from .crop import Crop
|
||||
img = ResizeToCover(self.width, self.height).process(img)
|
||||
return Crop(self.width, self.height,
|
||||
anchor=self.anchor).process(img)
|
||||
|
||||
|
||||
class SmartResize(object):
|
||||
"""
|
||||
The ``SmartResize`` processor is identical to ``ResizeToFill``, except that
|
||||
it uses entropy to crop the image instead of a user-specified anchor point.
|
||||
Internally, it simply runs the ``ResizeToCover`` and ``SmartCrop``
|
||||
processors in series.
|
||||
"""
|
||||
def __init__(self, width, height):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
|
||||
"""
|
||||
self.width, self.height = width, height
|
||||
|
||||
def process(self, img):
|
||||
from .crop import SmartCrop
|
||||
img = ResizeToCover(self.width, self.height).process(img)
|
||||
return SmartCrop(self.width, self.height).process(img)
|
||||
|
||||
|
||||
class ResizeCanvas(object):
|
||||
"""
|
||||
Resizes the canvas, using the provided background color if the new size is
|
||||
larger than the current image.
|
||||
|
||||
"""
|
||||
def __init__(self, width, height, color=None, anchor=None, x=None, y=None):
|
||||
"""
|
||||
:param width: The target width, in pixels.
|
||||
:param height: The target height, in pixels.
|
||||
:param color: The background color to use for padding.
|
||||
:param anchor: Specifies the position of the original image on the new
|
||||
canvas. Valid values are:
|
||||
|
||||
- Anchor.TOP_LEFT
|
||||
- Anchor.TOP
|
||||
- Anchor.TOP_RIGHT
|
||||
- Anchor.LEFT
|
||||
- Anchor.CENTER
|
||||
- Anchor.RIGHT
|
||||
- Anchor.BOTTOM_LEFT
|
||||
- Anchor.BOTTOM
|
||||
- Anchor.BOTTOM_RIGHT
|
||||
|
||||
You may also pass a tuple that indicates the position in
|
||||
percentages. For example, ``(0, 0)`` corresponds to "top left",
|
||||
``(0.5, 0.5)`` to "center" and ``(1, 1)`` to "bottom right". This is
|
||||
basically the same as using percentages in CSS background positions.
|
||||
|
||||
"""
|
||||
if x is not None or y is not None:
|
||||
if anchor:
|
||||
raise Exception('You may provide either an anchor or x and y'
|
||||
' coordinate, but not both.')
|
||||
else:
|
||||
self.x, self.y = x or 0, y or 0
|
||||
self.anchor = None
|
||||
else:
|
||||
self.anchor = anchor or Anchor.CENTER
|
||||
self.x = self.y = None
|
||||
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.color = color or (255, 255, 255, 0)
|
||||
|
||||
def process(self, img):
|
||||
original_width, original_height = img.size
|
||||
|
||||
if self.anchor:
|
||||
anchor = Anchor.get_tuple(self.anchor)
|
||||
trim_x, trim_y = self.width - original_width, \
|
||||
self.height - original_height
|
||||
x = int(float(trim_x) * float(anchor[0]))
|
||||
y = int(float(trim_y) * float(anchor[1]))
|
||||
else:
|
||||
x, y = self.x, self.y
|
||||
|
||||
new_img = Image.new('RGBA', (self.width, self.height), self.color)
|
||||
new_img.paste(img, (x, y))
|
||||
return new_img
|
||||
|
||||
|
||||
class AddBorder(object):
|
||||
"""
|
||||
Add a border of specific color and size to an image.
|
||||
|
||||
"""
|
||||
def __init__(self, thickness, color=None):
|
||||
"""
|
||||
:param color: Color to use for the border
|
||||
:param thickness: Thickness of the border. Can be either an int or
|
||||
a 4-tuple of ints of the form (top, right, bottom, left).
|
||||
"""
|
||||
self.color = color
|
||||
if isinstance(thickness, int):
|
||||
self.top = self.right = self.bottom = self.left = thickness
|
||||
else:
|
||||
self.top, self.right, self.bottom, self.left = thickness
|
||||
|
||||
def process(self, img):
|
||||
new_width = img.size[0] + self.left + self.right
|
||||
new_height = img.size[1] + self.top + self.bottom
|
||||
return ResizeCanvas(new_width, new_height, color=self.color,
|
||||
x=self.left, y=self.top).process(img)
|
||||
|
||||
|
||||
class ResizeToFit(object):
|
||||
"""
|
||||
Resizes an image to fit within the specified dimensions.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, width=None, height=None, upscale=None, mat_color=None, anchor=Anchor.CENTER):
|
||||
"""
|
||||
:param width: The maximum width of the desired image.
|
||||
:param height: The maximum height of the desired image.
|
||||
:param upscale: A boolean value specifying whether the image should
|
||||
be enlarged if its dimensions are smaller than the target
|
||||
dimensions.
|
||||
:param mat_color: If set, the target image size will be enforced and the
|
||||
specified color will be used as a background color to pad the image.
|
||||
|
||||
"""
|
||||
self.width = width
|
||||
self.height = height
|
||||
self.upscale = upscale
|
||||
self.mat_color = mat_color
|
||||
self.anchor = anchor
|
||||
|
||||
def process(self, img):
|
||||
cur_width, cur_height = img.size
|
||||
if not self.width is None and not self.height is None:
|
||||
ratio = min(float(self.width) / cur_width,
|
||||
float(self.height) / cur_height)
|
||||
else:
|
||||
if self.width is None:
|
||||
ratio = float(self.height) / cur_height
|
||||
else:
|
||||
ratio = float(self.width) / cur_width
|
||||
new_dimensions = (int(round(cur_width * ratio)),
|
||||
int(round(cur_height * ratio)))
|
||||
if (cur_width > new_dimensions[0] or cur_height > new_dimensions[1]) or self.upscale:
|
||||
img = Resize(new_dimensions[0], new_dimensions[1]).process(img)
|
||||
if self.mat_color is not None:
|
||||
img = ResizeCanvas(self.width, self.height, self.mat_color, anchor=self.anchor).process(img)
|
||||
return img
|
||||
|
||||
|
||||
class Thumbnail(object):
|
||||
"""
|
||||
Resize the image for use as a thumbnail. Wraps ``ResizeToFill``,
|
||||
``ResizeToFit``, and ``SmartResize``.
|
||||
|
||||
Note: while it doesn't currently, in the future this processor may also
|
||||
sharpen based on the amount of reduction.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, width=None, height=None, anchor=None, crop=None):
|
||||
self.width = width
|
||||
self.height = height
|
||||
if anchor:
|
||||
if crop is False:
|
||||
raise Exception("You can't specify an anchor point if crop is False.")
|
||||
else:
|
||||
crop = True
|
||||
elif crop is None:
|
||||
# Assume we are cropping if both a width and height are provided. If
|
||||
# only one is, we must be resizing to fit.
|
||||
crop = width is not None and height is not None
|
||||
|
||||
# A default anchor if cropping.
|
||||
if crop and anchor is None:
|
||||
anchor = 'auto'
|
||||
self.crop = crop
|
||||
self.anchor = anchor
|
||||
|
||||
def process(self, img):
|
||||
if self.crop:
|
||||
if not self.width or not self.height:
|
||||
raise Exception('You must provide both a width and height when'
|
||||
' cropping.')
|
||||
if self.anchor == 'auto':
|
||||
processor = SmartResize(self.width, self.height)
|
||||
else:
|
||||
processor = ResizeToFill(self.width, self.height, self.anchor)
|
||||
else:
|
||||
processor = ResizeToFit(self.width, self.height)
|
||||
return processor.process(img)
|
||||
|
|
@ -1,18 +0,0 @@
|
|||
import math
|
||||
from imagekit.lib import Image
|
||||
|
||||
|
||||
def histogram_entropy(im):
|
||||
"""
|
||||
Calculate the entropy of an images' histogram. Used for "smart cropping" in easy-thumbnails;
|
||||
see: https://raw.github.com/SmileyChris/easy-thumbnails/master/easy_thumbnails/utils.py
|
||||
|
||||
"""
|
||||
if not isinstance(im, Image.Image):
|
||||
return 0 # Fall back to a constant entropy.
|
||||
|
||||
histogram = im.histogram()
|
||||
hist_ceil = float(sum(histogram))
|
||||
histonorm = [histocol / hist_ceil for histocol in histogram]
|
||||
|
||||
return -sum([p * math.log(p, 2) for p in histonorm if p != 0])
|
||||
|
|
@ -1,26 +1,11 @@
|
|||
import logging
|
||||
import os
|
||||
import mimetypes
|
||||
import sys
|
||||
from tempfile import NamedTemporaryFile
|
||||
import types
|
||||
|
||||
from django.core.exceptions import ImproperlyConfigured
|
||||
from django.core.files import File
|
||||
from django.db.models.loading import cache
|
||||
from django.utils.functional import wraps
|
||||
from django.utils.importlib import import_module
|
||||
|
||||
from .exceptions import UnknownExtensionError, UnknownFormatError
|
||||
from .lib import Image, ImageFile, StringIO
|
||||
|
||||
|
||||
RGBA_TRANSPARENCY_FORMATS = ['PNG']
|
||||
PALETTE_TRANSPARENCY_FORMATS = ['PNG', 'GIF']
|
||||
|
||||
|
||||
def img_to_fobj(img, format, autoconvert=True, **options):
|
||||
return save_image(img, StringIO(), format, options, autoconvert)
|
||||
from pilkit.utils import *
|
||||
|
||||
|
||||
def get_spec_files(instance):
|
||||
|
|
@ -30,116 +15,6 @@ def get_spec_files(instance):
|
|||
return []
|
||||
|
||||
|
||||
def open_image(target):
|
||||
target.seek(0)
|
||||
img = Image.open(target)
|
||||
img.copy = types.MethodType(_wrap_copy(img.copy), img, img.__class__)
|
||||
return img
|
||||
|
||||
|
||||
def _wrap_copy(f):
|
||||
@wraps(f)
|
||||
def copy(self):
|
||||
img = f()
|
||||
try:
|
||||
img.app = self.app
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
img._getexif = self._getexif
|
||||
except AttributeError:
|
||||
pass
|
||||
return img
|
||||
return copy
|
||||
|
||||
|
||||
_pil_init = 0
|
||||
|
||||
|
||||
def _preinit_pil():
|
||||
"""Loads the standard PIL file format drivers. Returns True if ``preinit()``
|
||||
was called (and there's a potential that more drivers were loaded) or False
|
||||
if there is no possibility that new drivers were loaded.
|
||||
|
||||
"""
|
||||
global _pil_init
|
||||
if _pil_init < 1:
|
||||
Image.preinit()
|
||||
_pil_init = 1
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _init_pil():
|
||||
"""Loads all PIL file format drivers. Returns True if ``init()`` was called
|
||||
(and there's a potential that more drivers were loaded) or False if there is
|
||||
no possibility that new drivers were loaded.
|
||||
|
||||
"""
|
||||
global _pil_init
|
||||
_preinit_pil()
|
||||
if _pil_init < 2:
|
||||
Image.init()
|
||||
_pil_init = 2
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def _extension_to_format(extension):
|
||||
return Image.EXTENSION.get(extension.lower())
|
||||
|
||||
|
||||
def _format_to_extension(format):
|
||||
if format:
|
||||
for k, v in Image.EXTENSION.iteritems():
|
||||
if v == format.upper():
|
||||
return k
|
||||
return None
|
||||
|
||||
|
||||
def extension_to_mimetype(ext):
|
||||
try:
|
||||
filename = 'a%s' % (ext or '') # guess_type requires a full filename, not just an extension
|
||||
mimetype = mimetypes.guess_type(filename)[0]
|
||||
except IndexError:
|
||||
mimetype = None
|
||||
return mimetype
|
||||
|
||||
|
||||
def format_to_mimetype(format):
|
||||
return extension_to_mimetype(format_to_extension(format))
|
||||
|
||||
|
||||
def extension_to_format(extension):
|
||||
"""Returns the format that corresponds to the provided extension.
|
||||
|
||||
"""
|
||||
format = _extension_to_format(extension)
|
||||
if not format and _preinit_pil():
|
||||
format = _extension_to_format(extension)
|
||||
if not format and _init_pil():
|
||||
format = _extension_to_format(extension)
|
||||
if not format:
|
||||
raise UnknownExtensionError(extension)
|
||||
return format
|
||||
|
||||
|
||||
def format_to_extension(format):
|
||||
"""Returns the first extension that matches the provided format.
|
||||
|
||||
"""
|
||||
extension = None
|
||||
if format:
|
||||
extension = _format_to_extension(format)
|
||||
if not extension and _preinit_pil():
|
||||
extension = _format_to_extension(format)
|
||||
if not extension and _init_pil():
|
||||
extension = _format_to_extension(format)
|
||||
if not extension:
|
||||
raise UnknownFormatError(format)
|
||||
return extension
|
||||
|
||||
|
||||
def _get_models(apps):
|
||||
models = []
|
||||
for app_label in apps or []:
|
||||
|
|
@ -148,180 +23,6 @@ def _get_models(apps):
|
|||
return models
|
||||
|
||||
|
||||
def suggest_extension(name, format):
|
||||
original_extension = os.path.splitext(name)[1]
|
||||
try:
|
||||
suggested_extension = format_to_extension(format)
|
||||
except UnknownFormatError:
|
||||
extension = original_extension
|
||||
else:
|
||||
if suggested_extension.lower() == original_extension.lower():
|
||||
extension = original_extension
|
||||
else:
|
||||
try:
|
||||
original_format = extension_to_format(original_extension)
|
||||
except UnknownExtensionError:
|
||||
extension = suggested_extension
|
||||
else:
|
||||
# If the formats match, give precedence to the original extension.
|
||||
if format.lower() == original_format.lower():
|
||||
extension = original_extension
|
||||
else:
|
||||
extension = suggested_extension
|
||||
return extension
|
||||
|
||||
|
||||
def save_image(img, outfile, format, options=None, autoconvert=True):
|
||||
"""
|
||||
Wraps PIL's ``Image.save()`` method. There are two main benefits of using
|
||||
this function over PIL's:
|
||||
|
||||
1. It gracefully handles the infamous "Suspension not allowed here" errors.
|
||||
2. It prepares the image for saving using ``prepare_image()``, which will do
|
||||
some common-sense processing given the target format.
|
||||
|
||||
"""
|
||||
options = options or {}
|
||||
|
||||
if autoconvert:
|
||||
img, save_kwargs = prepare_image(img, format)
|
||||
options = dict(save_kwargs.items() + options.items())
|
||||
|
||||
# Attempt to reset the file pointer.
|
||||
try:
|
||||
outfile.seek(0)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
try:
|
||||
with quiet():
|
||||
img.save(outfile, format, **options)
|
||||
except IOError:
|
||||
# PIL can have problems saving large JPEGs if MAXBLOCK isn't big enough,
|
||||
# So if we have a problem saving, we temporarily increase it. See
|
||||
# http://github.com/jdriscoll/django-imagekit/issues/50
|
||||
old_maxblock = ImageFile.MAXBLOCK
|
||||
ImageFile.MAXBLOCK = img.size[0] * img.size[1]
|
||||
try:
|
||||
img.save(outfile, format, **options)
|
||||
finally:
|
||||
ImageFile.MAXBLOCK = old_maxblock
|
||||
|
||||
try:
|
||||
outfile.seek(0)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
return outfile
|
||||
|
||||
|
||||
class quiet(object):
|
||||
"""
|
||||
A context manager for suppressing the stderr activity of PIL's C libraries.
|
||||
Based on http://stackoverflow.com/a/978264/155370
|
||||
|
||||
"""
|
||||
def __enter__(self):
|
||||
self.stderr_fd = sys.__stderr__.fileno()
|
||||
self.null_fd = os.open(os.devnull, os.O_RDWR)
|
||||
self.old = os.dup(self.stderr_fd)
|
||||
os.dup2(self.null_fd, self.stderr_fd)
|
||||
|
||||
def __exit__(self, *args, **kwargs):
|
||||
os.dup2(self.old, self.stderr_fd)
|
||||
os.close(self.null_fd)
|
||||
os.close(self.old)
|
||||
|
||||
|
||||
def prepare_image(img, format):
|
||||
"""
|
||||
Prepares the image for saving to the provided format by doing some
|
||||
common-sense conversions. This includes things like preserving transparency
|
||||
and quantizing. This function is used automatically by ``save_image()``
|
||||
(and classes like ``ImageSpecField`` and ``ProcessedImageField``)
|
||||
immediately before saving unless you specify ``autoconvert=False``. It is
|
||||
provided as a utility for those doing their own processing.
|
||||
|
||||
:param img: The image to prepare for saving.
|
||||
:param format: The format that the image will be saved to.
|
||||
|
||||
"""
|
||||
matte = False
|
||||
save_kwargs = {}
|
||||
|
||||
if img.mode == 'RGBA':
|
||||
if format in RGBA_TRANSPARENCY_FORMATS:
|
||||
pass
|
||||
elif format in PALETTE_TRANSPARENCY_FORMATS:
|
||||
# If you're going from a format with alpha transparency to one
|
||||
# with palette transparency, transparency values will be
|
||||
# snapped: pixels that are more opaque than not will become
|
||||
# fully opaque; pixels that are more transparent than not will
|
||||
# become fully transparent. This will not produce a good-looking
|
||||
# result if your image contains varying levels of opacity; in
|
||||
# that case, you'll probably want to use a processor to matte
|
||||
# the image on a solid color. The reason we don't matte by
|
||||
# default is because not doing so allows processors to treat
|
||||
# RGBA-format images as a super-type of P-format images: if you
|
||||
# have an RGBA-format image with only a single transparent
|
||||
# color, and save it as a GIF, it will retain its transparency.
|
||||
# In other words, a P-format image converted to an
|
||||
# RGBA-formatted image by a processor and then saved as a
|
||||
# P-format image will give the expected results.
|
||||
|
||||
# Work around a bug in PIL: split() doesn't check to see if
|
||||
# img is loaded.
|
||||
img.load()
|
||||
|
||||
alpha = img.split()[-1]
|
||||
mask = Image.eval(alpha, lambda a: 255 if a <= 128 else 0)
|
||||
img = img.convert('RGB').convert('P', palette=Image.ADAPTIVE,
|
||||
colors=255)
|
||||
img.paste(255, mask)
|
||||
save_kwargs['transparency'] = 255
|
||||
else:
|
||||
# Simply converting an RGBA-format image to an RGB one creates a
|
||||
# gross result, so we matte the image on a white background. If
|
||||
# that's not what you want, that's fine: use a processor to deal
|
||||
# with the transparency however you want. This is simply a
|
||||
# sensible default that will always produce something that looks
|
||||
# good. Or at least, it will look better than just a straight
|
||||
# conversion.
|
||||
matte = True
|
||||
elif img.mode == 'P':
|
||||
if format in PALETTE_TRANSPARENCY_FORMATS:
|
||||
try:
|
||||
save_kwargs['transparency'] = img.info['transparency']
|
||||
except KeyError:
|
||||
pass
|
||||
elif format in RGBA_TRANSPARENCY_FORMATS:
|
||||
# Currently PIL doesn't support any RGBA-mode formats that
|
||||
# aren't also P-mode formats, so this will never happen.
|
||||
img = img.convert('RGBA')
|
||||
else:
|
||||
matte = True
|
||||
else:
|
||||
img = img.convert('RGB')
|
||||
|
||||
# GIFs are always going to be in palette mode, so we can do a little
|
||||
# optimization. Note that the RGBA sources also use adaptive
|
||||
# quantization (above). Images that are already in P mode don't need
|
||||
# any quantization because their colors are already limited.
|
||||
if format == 'GIF':
|
||||
img = img.convert('P', palette=Image.ADAPTIVE)
|
||||
|
||||
if matte:
|
||||
img = img.convert('RGBA')
|
||||
bg = Image.new('RGBA', img.size, (255, 255, 255))
|
||||
bg.paste(img, img)
|
||||
img = bg.convert('RGB')
|
||||
|
||||
if format == 'JPEG':
|
||||
save_kwargs['optimize'] = True
|
||||
|
||||
return img, save_kwargs
|
||||
|
||||
|
||||
def get_by_qname(path, desc):
|
||||
try:
|
||||
dot = path.rindex('.')
|
||||
|
|
|
|||
1
setup.py
1
setup.py
|
|
@ -50,6 +50,7 @@ setup(
|
|||
test_suite='testrunner.run_tests',
|
||||
install_requires=[
|
||||
'django-appconf>=0.5',
|
||||
'pilkit<2.0a0',
|
||||
],
|
||||
classifiers=[
|
||||
'Development Status :: 5 - Production/Stable',
|
||||
|
|
|
|||
Loading…
Reference in a new issue