Last active
July 18, 2018 03:43
-
-
Save BlogBlocks/0cb52687db00d505d83fd3aff4156d38 to your computer and use it in GitHub Desktop.
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| { | |
| "cells": [ | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "http://www.bogotobogo.com/python/OpenCV_Python/python_opencv3_basic_image_operations_pixel_access_image_load.php" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from PIL import Image, ImageFont, ImageDraw\n", | |
| "from PIL import ImageEnhance\n", | |
| "import cv2\n", | |
| "\n", | |
| "img1 = Image.new(\"RGBA\", (500, 500), color=(0, 10, 125, 125))\n", | |
| "img1" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "execute_result", | |
| "execution_count": 22, | |
| "data": { | |
| "image/png": [ | |
| "iVBORw0KGgoAAAANSUhEUgAAAfQAAAH0CAYAAADL1t+KAAAH8klEQVR4nO3VMRHAMBDAsDRjOT/3\n", | |
| "gsiQq09C4M3PemcWAPBr+3YAAHDO0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEg\n", | |
| "wNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA\n", | |
| "0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQ\n", | |
| "ASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNABIMDQASDA0AEgwNAB\n", | |
| "IMDQASDA0AEgwNABIMDQASDgA2PxBOxxccoyAAAAAElFTkSuQmCC\n" | |
| ], | |
| "text/plain": [ | |
| "<PIL.Image.Image image mode=RGBA size=500x500 at 0x7F1038079C50>" | |
| ] | |
| }, | |
| "metadata": {} | |
| } | |
| ], | |
| "execution_count": 22, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from PIL import Image, ImageFont, ImageDraw\n", | |
| "from PIL import ImageEnhance\n", | |
| "import cv2\n", | |
| "\n", | |
| "img1 = Image.new(\"RGBA\", (675, 640), color=(0, 0, 125, 5))\n", | |
| "dr1 = ImageDraw.Draw(img1)\n", | |
| "fnt = ImageFont.truetype(\"/home/jack/.fonts/LuckiestGuy.ttf\",35)\n", | |
| "dr1.text((230, 15), \"Three Images Combined\", font=fnt, fill=(255, 255, 0, 128))\n", | |
| "img1.save('images/test_out.png')\n", | |
| "\n", | |
| "r1 = cv2.imread(\"images/S-hicks01.jpg\")\n", | |
| "r2 = cv2.imread(\"images/test_out.png\")\n", | |
| "r3 = cv2.imread(\"images/waves.jpg\")\n", | |
| "r4 = cv2.imread(\"images/S-hicks02.jpg\")\n", | |
| "\n", | |
| "r1 = r1 * 1.5\n", | |
| "r2 = r2 *.2\n", | |
| "r3 = r3 *.4\n", | |
| "r4 = r4 \n", | |
| "\n", | |
| "to = r1+r2+r3+r4 \n", | |
| "ave = (to/4)-40\n", | |
| "\n", | |
| "cv2.imwrite(\"images/hicks-mask2.png\", ave)\n", | |
| "\n", | |
| "def change_contrast(img, level):\n", | |
| " factor = (259 * (level + 255)) / (255 * (259 - level))\n", | |
| " def contrast(c):\n", | |
| " return 128 + factor * (c - 50)\n", | |
| " return img.point(contrast)\n", | |
| "\n", | |
| "nimC = change_contrast(Image.open(\"images/hicks-mask2.png\"),120)\n", | |
| "nimC.save(\"images/hicks-mask4.png\")\n", | |
| "!showme images/hicks-mask4.png\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": 25, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from skimage import data, color, io, img_as_float\n", | |
| "import numpy as np\n", | |
| "import matplotlib.pyplot as plt\n", | |
| "from PIL import Image \n", | |
| "import cv2\n", | |
| "#alpha = 0.6\n", | |
| "#img1 = Image.new(\"RGBA\", (675, 640), color=(0, 0, 125, 5))\n", | |
| "#img1.save(\"images/img1-img1.png\")\n", | |
| "mask = np.zeros((500, 425), dtype=np.float, (0, 0, 125, 5))\n" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "error", | |
| "ename": "SyntaxError", | |
| "evalue": "non-keyword arg after keyword arg (<ipython-input-63-9d67b145c48d>, line 9)", | |
| "traceback": [ | |
| "\u001b[0;36m File \u001b[0;32m\"<ipython-input-63-9d67b145c48d>\"\u001b[0;36m, line \u001b[0;32m9\u001b[0m\n\u001b[0;31m mask = np.zeros((500, 425),dtype=np.float, (0, 0, 125, 5))\u001b[0m\n\u001b[0;31mSyntaxError\u001b[0m\u001b[0;31m:\u001b[0m non-keyword arg after keyword arg\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 63, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "img_file = 'images/lena-a.png'\n", | |
| "img = cv2.imread(img_file, cv2.IMREAD_COLOR) # rgb\n", | |
| "alpha_img = cv2.imread(img_file, cv2.IMREAD_UNCHANGED) # rgba\n", | |
| "gray_img = cv2.imread(img_file, cv2.IMREAD_GRAYSCALE) # grayscale\n", | |
| "\n", | |
| "print type(img)\n", | |
| "print 'RGB shape: ', img.shape # Rows, cols, channels\n", | |
| "print 'ARGB shape:', alpha_img.shape\n", | |
| "print 'Gray shape:', gray_img.shape\n", | |
| "print 'img.dtype: ', img.dtype\n", | |
| "print 'img.size: ', img.size\n" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "<type 'numpy.ndarray'>\n", | |
| "RGB shape: (512, 512, 3)\n", | |
| "ARGB shape: (512, 512, 4)\n", | |
| "Gray shape: (512, 512)\n", | |
| "img.dtype: uint8\n", | |
| "img.size: 786432\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 68, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "source": [ | |
| "### Using a Mask to tint an image" | |
| ], | |
| "metadata": {} | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# Using a Mask to tint an image\n", | |
| "import numpy as np\n", | |
| "import cv2\n", | |
| "\n", | |
| "i = cv2.imread('images/lena.png')\n", | |
| "#convert to floating point\n", | |
| "img = np.array(i, dtype=np.float)\n", | |
| "img /= 255.0\n", | |
| "cv2.imshow('img',img)\n", | |
| "cv2.waitKey(0)\n", | |
| "\n", | |
| "j = cv2.imread('images/lena-mask.png')\n", | |
| "#convert to floating point\n", | |
| "mask = np.array(j, dtype=np.float)\n", | |
| "mask /= 255.0\n", | |
| "#set transparency to 25%\n", | |
| "transparency = .25\n", | |
| "mask*=transparency\n", | |
| "cv2.imshow('img',mask)\n", | |
| "cv2.waitKey(0)\n", | |
| "\n", | |
| "#make a green overlay\n", | |
| "green = np.ones(img.shape, dtype=np.float)*(0,1,0)\n", | |
| "\n", | |
| "#green over original image\n", | |
| "out = green*mask + img*(1.0-mask)\n", | |
| "cv2.imshow('img',out)\n", | |
| "cv2.waitKey(0)\n", | |
| "\ncv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": 66, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from skimage import data, color, io, img_as_float\n", | |
| "import numpy as np\n", | |
| "import matplotlib.pyplot as plt\n", | |
| "from PIL import Image \n", | |
| "import cv2\n", | |
| "alpha = 0.6\n", | |
| "img1 = Image.new(\"RGBA\", (675, 640), color=(0, 0, 125, 5))\n", | |
| "#cv2.imwrite(\"images/img1-img1.png\", img1) \n", | |
| "#img = cv2.imread(\"images/img1-img1.png\")\n", | |
| "shape = img1.shape\n", | |
| "color_mask = np.zeros((shape))\n", | |
| "\n", | |
| "# Construct a colour image to superimpose\n", | |
| "color_mask = np.zeros((shape))\n", | |
| "color_mask[30:140, 30:140] = [1, 0, 0] # Red block\n", | |
| "color_mask[170:270, 40:120] = [0, 1, 0] # Green block\n", | |
| "color_mask[200:350, 200:350] = [0, 0, 1] # Blue block\n", | |
| "\n", | |
| "# Construct RGB version of grey-level image\n", | |
| "img_color = np.dstack((img, img, img))\n", | |
| "\n", | |
| "# Convert the input image and color mask to Hue Saturation Value (HSV)\n", | |
| "# colorspace\n", | |
| "img_hsv = color.rgb2hsv(img_color)\n", | |
| "color_mask_hsv = color.rgb2hsv(color_mask)\n", | |
| "\n", | |
| "# Replace the hue and saturation of the original image\n", | |
| "\n" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "error", | |
| "ename": "AttributeError", | |
| "evalue": "'Image' object has no attribute 'shape'", | |
| "traceback": [ | |
| "\u001b[0;31m\u001b[0m", | |
| "\u001b[0;31mAttributeError\u001b[0mTraceback (most recent call last)", | |
| "\u001b[0;32m<ipython-input-58-7104bb83a434>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 8\u001b[0m \u001b[0;31m#cv2.imwrite(\"images/img1-img1.png\", img1)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0;31m#img = cv2.imread(\"images/img1-img1.png\")\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 10\u001b[0;31m \u001b[0mshape\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mimg1\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 11\u001b[0m \u001b[0mcolor_mask\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mzeros\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mshape\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 12\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;31mAttributeError\u001b[0m: 'Image' object has no attribute 'shape'" | |
| ] | |
| } | |
| ], | |
| "execution_count": 58, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "\n", | |
| "from skimage import data, color, io, img_as_float\n", | |
| "import numpy as np\n", | |
| "import matplotlib.pyplot as plt\n", | |
| "\n", | |
| "alpha = 0.6\n", | |
| "img = cv2.imread(\"images/hicks-mask.png\")\n", | |
| "shape = img.shape\n", | |
| "\n", | |
| "# Construct a colour image to superimpose\n", | |
| "color_mask = np.zeros((shape))\n", | |
| "color_mask[30:140, 30:140] = [1, 0, 0] # Red block\n", | |
| "color_mask[170:270, 40:120] = [0, 1, 0] # Green block\n", | |
| "color_mask[200:350, 200:350] = [0, 0, 1] # Blue block\n", | |
| "\n", | |
| "# Construct RGB version of grey-level image\n", | |
| "img_color = np.dstack((img, img, img))\n", | |
| "\n", | |
| "# Convert the input image and color mask to Hue Saturation Value (HSV)\n", | |
| "# colorspace\n", | |
| "img_hsv = color.rgb2hsv(img_color)\n", | |
| "color_mask_hsv = color.rgb2hsv(color_mask)\n", | |
| "\n", | |
| "# Replace the hue and saturation of the original image\n", | |
| "# with that of the color mask\n", | |
| "img_hsv[..., 0] = color_mask_hsv[..., 0]\n", | |
| "img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha\n", | |
| "\n", | |
| "img_masked = color.hsv2rgb(img_hsv)\n", | |
| "\n", | |
| "# Display the output\n", | |
| "f, (ax0, ax1, ax2) = plt.subplots(1, 3,\n", | |
| " subplot_kw={'xticks': [], 'yticks': []})\n", | |
| "ax0.imshow(img, cmap=plt.cm.gray)\n", | |
| "ax1.imshow(color_mask)\n", | |
| "ax2.imshow(img_masked)\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "error", | |
| "ename": "ValueError", | |
| "evalue": "the input array must be have a shape == (.., ..,[ ..,] 3)), got (640, 675, 9)", | |
| "traceback": [ | |
| "\u001b[0;31m\u001b[0m", | |
| "\u001b[0;31mValueError\u001b[0mTraceback (most recent call last)", | |
| "\u001b[0;32m<ipython-input-39-3426c8ebcc40>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 19\u001b[0m \u001b[0;31m# Convert the input image and color mask to Hue Saturation Value (HSV)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0;31m# colorspace\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 21\u001b[0;31m \u001b[0mimg_hsv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcolor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrgb2hsv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg_color\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 22\u001b[0m \u001b[0mcolor_mask_hsv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcolor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrgb2hsv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mcolor_mask\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 23\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;32m/home/jack/anaconda2/lib/python2.7/site-packages/skimage/color/colorconv.pyc\u001b[0m in \u001b[0;36mrgb2hsv\u001b[0;34m(rgb)\u001b[0m\n\u001b[1;32m 256\u001b[0m \u001b[0;34m>>\u001b[0m\u001b[0;34m>\u001b[0m \u001b[0mimg_hsv\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mcolor\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mrgb2hsv\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mimg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 257\u001b[0m \"\"\"\n\u001b[0;32m--> 258\u001b[0;31m \u001b[0marr\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0m_prepare_colorarray\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrgb\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 259\u001b[0m \u001b[0mout\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mempty_like\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 260\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;32m/home/jack/anaconda2/lib/python2.7/site-packages/skimage/color/colorconv.pyc\u001b[0m in \u001b[0;36m_prepare_colorarray\u001b[0;34m(arr)\u001b[0m\n\u001b[1;32m 153\u001b[0m msg = (\"the input array must be have a shape == (.., ..,[ ..,] 3)), \" +\n\u001b[1;32m 154\u001b[0m \"got (\" + (\", \".join(map(str, arr.shape))) + \")\")\n\u001b[0;32m--> 155\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mValueError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmsg\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 156\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 157\u001b[0m \u001b[0;32mreturn\u001b[0m \u001b[0mdtype\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimg_as_float\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0marr\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;31mValueError\u001b[0m: the input array must be have a shape == (.., ..,[ ..,] 3)), got (640, 675, 9)" | |
| ] | |
| } | |
| ], | |
| "execution_count": 39, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from PIL import Image, ImageFont, ImageDraw\n", | |
| "from PIL import ImageEnhance\n", | |
| "import cv2\n", | |
| "\n", | |
| "img1 = Image.new(\"RGBA\", (675, 640), color=(0, 0, 125, 5))\n", | |
| "dr1 = ImageDraw.Draw(img1)\n", | |
| "fnt = ImageFont.truetype(\"/home/jack/.fonts/LuckiestGuy.ttf\",35)\n", | |
| "dr1.text((230, 15), \"Three Images Combined\", font=fnt, fill=(255, 255, 0, 128))\n", | |
| "img1.save('images/test_out.png')\n", | |
| "\n", | |
| "r1 = cv2.imread(\"images/S-hicks01.jpg\")\n", | |
| "r2 = cv2.imread(\"images/test_out.png\")\n", | |
| "r3 = cv2.imread(\"images/waves.jpg\")\n", | |
| "r4 = cv2.imread(\"images/S-hicks02.jpg\")\n", | |
| "\n", | |
| "r1 = r * 1.5\n", | |
| "r2 = r2 *.2\n", | |
| "r3 = r3 *.3\n", | |
| "r4 = r4 \n", | |
| "\n", | |
| "to = r1+r2+r3+r4 \n", | |
| "ave = (to/4)-45\n", | |
| "\n", | |
| "#image = Image.fromarray(ave.astype('uint8'), 'RGB')\n", | |
| "image = Image.fromarray(ave.astype('uint8'), 'RGB')\n", | |
| "\n", | |
| "def change_contrast(level):\n", | |
| " factor = (259 * (level + 255)) / (255 * (259 - level))\n", | |
| " def contrast(c):\n", | |
| " return 128 + factor * (c - 150)\n", | |
| " return image.point(contrast)\n", | |
| "\n", | |
| "nimC = change_contrast(180)\n", | |
| "nimC.save(\"images/hicks-test3.png\")\n", | |
| "!showme images/hicks-test3.png\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# import cv2\n", | |
| "import numpy as np\n", | |
| "from PIL import Image\n", | |
| "#cap = Image.open('images/face_300.jpg')\n", | |
| "cap = cv2.imread('images/face_300.jpg')\n", | |
| "#cap = cv2.imread('images/test-color.png')\n", | |
| "while(1):\n", | |
| " # Take each frame\n", | |
| " frame = cap\n", | |
| " # Convert BGR to HSV\n", | |
| " hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n", | |
| " #colormask = cv2.cvtColor(frame, cv2.COLOR_RGB2HSV)\n", | |
| " #hsv Hue Saturation Value\n", | |
| " lower_red = np.array([200,250,200])\n", | |
| " upper_red = np.array([255,255,255])\n", | |
| " \n", | |
| " # Threshold the HSV image to get only blue colors\n", | |
| " mask = cv2.inRange(hsv, lower_red, upper_red)\n", | |
| "\n", | |
| " # Bitwise-AND mask and original image\n", | |
| " res = cv2.bitwise_and(frame, frame, mask= mask)\n", | |
| "\n", | |
| " cv2.imshow('frame',frame)\n", | |
| " cv2.imshow('mask',mask)\n", | |
| " #cv2.imshow('colormask',colormask)\n", | |
| " k = cv2.waitKey(5) & 0xFF\n", | |
| " if k == 27:\n", | |
| " break\n", | |
| "\n", | |
| "cv2.destroyAllWindows()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# import cv2\n", | |
| "import numpy as np\n", | |
| "from PIL import Image\n", | |
| "#cap = Image.open('images/face_300.jpg')\n", | |
| "#cap = cv2.imread('images/face.jpg')\n", | |
| "cap = cv2.imread('images/test-color.png')\n", | |
| "while(1):\n", | |
| " # Take each frame\n", | |
| " frame = cap\n", | |
| " # Convert BGR to HSV\n", | |
| " hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n", | |
| " \n", | |
| " #hue 0-85 85-170 170-255\n", | |
| " \n", | |
| " upper_blue = np.array([255,255,255])\n", | |
| " lower_blue = np.array([100,50,50])\n", | |
| " \n", | |
| " upper_green= np.array([100,255,255])\n", | |
| " lower_green = np.array([40,50,50])\n", | |
| " \n", | |
| " upper_red = np.array([40,255,255])\n", | |
| " lower_red = np.array([0,50,50])\n", | |
| " \n", | |
| " # Threshold the HSV image to get only blue colors\n", | |
| " mask = cv2.inRange(hsv, lower_green, upper_green)\n", | |
| " mask2 = cv2.inRange(hsv, lower_blue, upper_blue)\n", | |
| " mask3 = cv2.inRange(hsv, lower_red, upper_red)\n", | |
| "\n", | |
| " # Bitwise-AND mask and original image\n", | |
| " res = cv2.bitwise_and(frame,frame, mask= mask)\n", | |
| "\n", | |
| " #cv2.imshow('frame',frame)\n", | |
| " cv2.imshow('mask',mask)\n", | |
| " cv2.imshow('mask2',mask2)\n", | |
| " cv2.imshow('mask3',mask3)\n", | |
| " #cv2.imshow('hsv',hsv)\n", | |
| " #cv2.imshow('res',res)\n", | |
| " \n", | |
| " k = cv2.waitKey(5) & 0xFF\n", | |
| " if k == 27:\n", | |
| " break\n", | |
| "\n", | |
| "cv2.destroyAllWindows()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "flags = [i for i in dir(cv2) if i.startswith('COLOR_')]\n", | |
| "print flags" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "img = cv2.imread(\"images/face.jpg\")\n", | |
| "\n", | |
| "color_mask = np.zeros((rows, cols, 3))\n", | |
| "color_mask[30:140, 30:140] = [1, 0, 0] # Red block\n", | |
| "color_mask[170:270, 40:120] = [0, 1, 0] # Green block\n", | |
| "color_mask[200:350, 200:350] = [0, 0, 1] # Blue block\n", | |
| "\n", | |
| "# Construct RGB version of grey-level image\n", | |
| "img_color = np.dstack((img, img, img))\n", | |
| "\n", | |
| "# Convert the input image and color mask to Hue Saturation Value (HSV)\n", | |
| "# colorspace\n", | |
| "img_hsv = color.rgb2hsv(img_color)\n", | |
| "color_mask_hsv = color.rgb2hsv(color_mask)\n", | |
| "\n", | |
| "NC = cv2.cvtColor(col, cv2.COLOR_BAYER_BG2BGR)\n", | |
| "\ncv2.imshow(NC)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from PIL import Image\n", | |
| "from PIL import ImageFont\n", | |
| "from PIL import ImageDraw\n", | |
| "from PIL import ImageEnhance\n", | |
| "import cv2\n", | |
| "\n", | |
| "fnt = ImageFont.truetype(\"/home/jack/.fonts/LuckiestGuy.ttf\",35)\n", | |
| "img1 = Image.new(\"RGBA\", 100, 100, color=(0, 0, 0, 230)) #RGBA\n", | |
| "dr1 = ImageDraw.Draw(img1)\n", | |
| "dr1.text((5, 5), \"some text\", font=fnt)\n", | |
| "\n", | |
| "# my source image\n", | |
| "my_img.paste(dr1, (10, 10))\n", | |
| "my_img.save(\"images/out_file.png\", \"PNG\")" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "img=Image.new(\"RGBA\", (675,640),(0,0,100))\n", | |
| "img" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from PIL import Image, ImageFont, ImageDraw\n", | |
| "img1 = Image.new(\"RGBA\", (675, 640), color=(0, 0, 0, 0))\n", | |
| "dr1 = ImageDraw.Draw(img1)\n", | |
| "fnt = ImageFont.truetype(\"/home/jack/.fonts/LuckiestGuy.ttf\",35)\n", | |
| "dr1.text((330, 15), \"some text\", font=fnt, fill=(255, 255, 0, 128))\n", | |
| "#img1.show()\n", | |
| "img1.save('images/test_out.png')\n", | |
| "img1" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "#img = Image.open(\"images/hicks-mask.png\")\n", | |
| "#img = cv2.imread(\"images/hicks-mask.png\")\n", | |
| "from PIL import Image\n", | |
| "from PIL import ImageDraw\n", | |
| "\n", | |
| "im = Image.new(\"P\", (400, 400), 0)\n", | |
| "\n", | |
| "im.putpalette([\n", | |
| " 0, 0, 0, # black background\n", | |
| " 255, 0, 0, # index 1 is red\n", | |
| " 255, 255, 0, # index 2 is yellow\n", | |
| " 255, 153, 0, # index 3 is orange\n", | |
| "])\n", | |
| "\n", | |
| "d = ImageDraw.ImageDraw(im)\n", | |
| "d.setfill(1)\n", | |
| "\n", | |
| "d.setink(1)\n", | |
| "d.polygon((0, 0, 0, 400, 400, 400))\n", | |
| "\n", | |
| "d.setink(3)\n", | |
| "d.rectangle((100, 100, 300, 300))\n", | |
| "\n", | |
| "d.setink(2)\n", | |
| "d.ellipse((120, 120, 280, 280))\n", | |
| "\n", | |
| "im.save(\"images/pallet-out.png\")\n", | |
| "!showme images/pallet-out.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# import cv2\n", | |
| "import numpy as np\n", | |
| "from PIL import Image\n", | |
| "#cap = Image.open('images/face_300.jpg')\n", | |
| "cap = cv2.imread('images/face_300.jpg')\n", | |
| "#cap = cv2.imread('images/test-color.png')\n", | |
| "while(1):\n", | |
| " # Take each frame\n", | |
| " frame = cap\n", | |
| " # Convert BGR to HSV\n", | |
| " hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)\n", | |
| " \n", | |
| " lower_red = np.array([0,0,200])\n", | |
| " upper_red = np.array([255,255,255])\n", | |
| " \n", | |
| " # Threshold the HSV image to get only blue colors\n", | |
| " mask = cv2.inRange(hsv, lower_red, upper_red)\n", | |
| "\n", | |
| " # Bitwise-AND mask and original image\n", | |
| " res = cv2.bitwise_and(frame,frame, mask= mask)\n", | |
| "\n", | |
| " cv2.imshow('frame',frame)\n", | |
| " cv2.imshow('mask',mask)\n", | |
| " cv2.imshow('hsv',hsv)\n", | |
| " k = cv2.waitKey(5) & 0xFF\n", | |
| " if k == 27:\n", | |
| " break\n", | |
| "\ncv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "\n", | |
| "from skimage import data, color, io, img_as_float\n", | |
| "import numpy as np\n", | |
| "import matplotlib.pyplot as plt\n", | |
| "\n", | |
| "alpha = 0.6\n", | |
| "#img = Image.open(\"images/hicks-mask.png\")\n", | |
| "img = cv2.imread(\"images/hicks-mask.png\")\n", | |
| "\n", | |
| "#img = img_as_float(data.camera())\n", | |
| "#rows, cols = img.shape\n", | |
| "\n", | |
| "# Construct a colour image to superimpose\n", | |
| "color_mask = np.zeros((rows, cols, 3))\n", | |
| "color_mask[30:140, 30:140] = [1, 0, 0] # Red block\n", | |
| "color_mask[170:270, 40:120] = [0, 1, 0] # Green block\n", | |
| "color_mask[200:350, 200:350] = [0, 0, 1] # Blue block\n", | |
| "\n", | |
| "# Construct RGB version of grey-level image\n", | |
| "img_color = np.dstack((img, img, img))\n", | |
| "\n", | |
| "# Convert the input image and color mask to Hue Saturation Value (HSV)\n", | |
| "# colorspace\n", | |
| "img_hsv = color.rgb2hsv(img_color)\n", | |
| "color_mask_hsv = color.rgb2hsv(color_mask)\n", | |
| "\n", | |
| "# Replace the hue and saturation of the original image\n", | |
| "# with that of the color mask\n", | |
| "img_hsv[..., 0] = color_mask_hsv[..., 0]\n", | |
| "img_hsv[..., 1] = color_mask_hsv[..., 1] * alpha\n", | |
| "\n", | |
| "img_masked = color.hsv2rgb(img_hsv)\n", | |
| "\n", | |
| "# Display the output\n", | |
| "f, (ax0, ax1, ax2) = plt.subplots(1, 3,\n", | |
| " subplot_kw={'xticks': [], 'yticks': []})\n", | |
| "ax0.imshow(img, cmap=plt.cm.gray)\n", | |
| "ax1.imshow(color_mask)\n", | |
| "ax2.imshow(img_masked)\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "r = cv2.imread(\"images/S-hicks01.jpg\")\n", | |
| "r2 = cv2.imread(\"images/S-hicks02.jpg\")\n", | |
| "r3 = cv2.imread(\"images/waves.jpg\")\n", | |
| "\n", | |
| "r = r *.8\n", | |
| "r2 = r2*.2\n", | |
| "\n", | |
| "rr2 = ((r + r2) +r3)-200\n", | |
| "sqr4 = (rr2/3)\n", | |
| "\n", | |
| "#im = Image.fromarray(np.uint8(cm.gist_earth(myarray)*255))\n", | |
| "#im = Image.fromarray(np.uint8(d*255)\n", | |
| "#cv2.imwrite(\"images/hicks-water.png\", rr2)\n", | |
| "cv2.imwrite(\"images/hicks-water1.png\", sqr4)\n", | |
| "!showme images/hicks-water1.png\n", | |
| "\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from sklearn.preprocessing import normalize\n", | |
| "r = cv2.imread(\"images/S-hicks01.jpg\")\n", | |
| "r2 = cv2.imread(\"images/S-hicks02.jpg\")\n", | |
| "r3 = cv2.imread(\"images/waves.jpg\")\n", | |
| "\n", | |
| "#rr2 = r+r2+r3\n", | |
| "rr2 = (r + r2)\n", | |
| "\n", | |
| "#This works as a blend it can be applied to either image in different degrees\n", | |
| "#r2 = r2*.4\n", | |
| "\n", | |
| "#rr2 = (r + r2)\n", | |
| "#sqr4 = (rr2/3)+25\n", | |
| "\n", | |
| "#cv2.imwrite(\"images/hicks-wave.png\", sqr4)\n", | |
| "cv2.imwrite(\"images/hicks-wave.png\", rr2)\n", | |
| "!showme images/hicks-wav.png\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!ls images" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from sklearn.preprocessing import normalize\n", | |
| "r = cv2.imread(\"images/S-hicks01.jpg\")\n", | |
| "r2 = cv2.imread(\"images/S-hicks02.jpg\")\n", | |
| "r3 = cv2.imread(\"images/waves.jpg\")\n", | |
| "\n\n", | |
| "rr2 = r3 + (r + r2)\n", | |
| "\n", | |
| "norm1 = rr2 / np.linalg.norm(rr2)\n", | |
| "norm2 = normalize(rr2[:,np.newaxis], axis=0).ravel()\n", | |
| "print np.all(norm1 == norm2)\n", | |
| "# True" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "import numpy as np\n", | |
| "r = cv2.imread(\"images/face.jpg\")\n", | |
| "r2 = cv2.imread(\"images/face-i.jpg\")\n", | |
| "\n", | |
| "rh = r + r2\n", | |
| "#im = Image.fromarray(np.uint8(cm.gist_earth(myarray)*255))\n", | |
| "#im = Image.fromarray(np.uint8(d*255)\n", | |
| "cv2.imwrite(\"images/numpxhr2.png\", rh)\n", | |
| "!showme images/numpxhr2.png\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "list(h2s.getdata())" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "import numpy as np\n", | |
| "r = cv2.imread(\"images/face.jpg\")\n", | |
| "r2 = cv2.imread(\"images/face-i.jpg\")\n", | |
| "\n", | |
| "rh = r + r2\n", | |
| "#im = Image.fromarray(np.uint8(cm.gist_earth(myarray)*255))\n", | |
| "#im = Image.fromarray(np.uint8(d*255)\n", | |
| "cv2.imwrite(\"images/numpxhr2.png\", rh)\n", | |
| "!showme images/numpxhr2.png\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "def my_func(a):\n", | |
| " \"\"\"Average first and last element of a 1-D array\"\"\"\n", | |
| " return (a[0] + a[-1]) * 0.5\n", | |
| "b = cv2.imread('images/face_300.jpg')\n", | |
| "nm = np.apply_along_axis(my_func, 0, b)\n", | |
| "#array([ 4., 5., 6.])\n", | |
| "#np.apply_along_axis(my_func, 1, b)\n", | |
| "#array([ 2., 5., 8.])\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "d = np.load('numpy-filters/cv2LUT_HSV.npy')\n", | |
| "#image = d # your source data\n", | |
| "#image = cv2.imread('images/face_300.jpg')\n", | |
| "\n\n", | |
| "#image = image.astype(np.float32) # convert to float\n", | |
| "#image -= image.min(2,2,0) # ensure the minimal value is 0.0\n", | |
| "#image /= image.max() # maximum valu\n", | |
| "cv2.imwrite(\"images/NPimage.png\", d)\n", | |
| "!showme images/NPimage.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "#d = np.load('numpy-filters/cv2LUT_HSV.npy')\n", | |
| "#image = d # your source data\n", | |
| "image = cv2.imread('images/face_300.jpg')\n", | |
| "\n\n", | |
| "image = image.astype(np.float32) # convert to float\n", | |
| "image -= image.min(2,2,0) # ensure the minimal value is 0.0\n", | |
| "image /= image.max() # maximum valu\n", | |
| "cv2.imwrite(\"images/NPimage.png\", image)\n", | |
| "!showme images/NPimage.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "%matplotlib inline\n", | |
| "im = np.array([[2, 3, 2], [3, 4, 1], [6, 1, 5]])\n", | |
| "mask = np.array([[False, False, True], [False, True, True], [False, False, False]])\n", | |
| "\n", | |
| "# note that the mask is inverted (~) to show color where mask equals true\n", | |
| "im_ma = np.ma.array(im, mask=~mask)\n", | |
| "\n", | |
| "# some default keywords for imshow\n", | |
| "kwargs = {'interpolation': 'none', 'vmin': im.min(), 'vmax': im.max()}\n", | |
| "\n", | |
| "fig, ax = plt.subplots(1,3, figsize=(10,5), subplot_kw={'xticks': [], 'yticks': []})\n", | |
| "\n", | |
| "ax[0].set_title('\"Original\" data')\n", | |
| "ax[0].imshow(im, cmap=plt.cm.Greys_r, **kwargs)\n", | |
| "\n", | |
| "ax[1].set_title('Mask')\n", | |
| "ax[1].imshow(mask, cmap=plt.cm.binary, interpolation='none')\n", | |
| "\n", | |
| "ax[2].set_title('Masked data in color (jet)')\n", | |
| "ax[2].imshow(im, cmap=plt.cm.Greys_r, **kwargs)\n", | |
| "ax[2].imshow(im_ma, cmap=plt.cm.jet, **kwargs)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import glob\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "X_data = []\n", | |
| "files = glob.glob (\"images/*.png\")\n", | |
| "for myFile in files:\n", | |
| " print(myFile)\n", | |
| " image = cv2.imread (myFile)\n", | |
| " X_data.append (image)\n", | |
| "\nnp.savez('images/bigblob', image)" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "images/hicks-water3.png\n", | |
| "images/numpxh.png\n", | |
| "images/hicks-sqr4.png\n", | |
| "images/mask.png\n", | |
| "images/hicks-sqr.png\n", | |
| "images/test-color.png\n", | |
| "images/hicks3.png\n", | |
| "images/newim.png\n", | |
| "images/hicks-lev.png\n", | |
| "images/hicks-mask3.png\n", | |
| "images/hicks3a.png\n", | |
| "images/test_out.png\n", | |
| "images/hicks-sqr3.png\n", | |
| "images/numpxhr2.png\n", | |
| "images/mask-i.png\n", | |
| "images/hicks-test3.png\n", | |
| "images/hicks-water1.png\n", | |
| "images/hicks-post.png\n", | |
| "images/hicks-post3.png\n", | |
| "images/numpxr2.png\n", | |
| "images/face.png\n", | |
| "images/face-i.png\n", | |
| "images/NPimage.png\n", | |
| "images/hicks-mask5.png\n", | |
| "images/hicks-mask.png\n", | |
| "images/hicks-post2.png\n", | |
| "images/hicks-test2.png\n", | |
| "images/hicks-wave.png\n", | |
| "images/hicks-test.png\n", | |
| "images/numpx2.png\n", | |
| "images/my.png\n", | |
| "images/face_HSV.png\n", | |
| "images/hicks-water.png\n", | |
| "images/hicks-post-water1.png\n", | |
| "images/hicks-mask2.png\n", | |
| "images/hicks-mask4.png\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 1, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# 'ndimage.convolve' function from scipy. ndimage provides a \"N\" Dimensional convolution.\n", | |
| "#If you want convolutions to work, both the image and the kernel must have the same number of dimensions.\n", | |
| "#Kernel (4,4,7) cannot be convolved with and image (130,130). A singleton dimension must be added\n", | |
| "#before convolution. Itmay be removed after words with squeeze.\n", | |
| "\n", | |
| "img = np.zeros(shape=(130,130),dtype=np.float32)\n", | |
| "img = img[:,:,None] # Add singleton dimension\n", | |
| "res = convolve(img,kernel)\n", | |
| "finalOutput = res.squeeze() # Remove singleton" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import cv2\n", | |
| "\n", | |
| "im = cv2.imread('spies.jpg')\n", | |
| "#im = np.ones((20, 20)) * np.arange(20)\n", | |
| "#im = im[:,:,None] # Add singleton dimension\n", | |
| "footprint = np.array([[-4,0,-1,0,-4],\n", | |
| " [0,0,-1,0,0],\n", | |
| " [-1,-1,32,-1,-1],\n", | |
| " [0,0,-1,0,0],\n", | |
| " [-4,0,-1,0,-4]])\n", | |
| "footprint = footprint[:,:,None] # Add singleton dimension\n", | |
| "def test(x):\n", | |
| " return (x*0.5).sum()\n", | |
| "\n", | |
| "res = ndimage.generic_filter(im, test, footprint=footprint)\n", | |
| "finalOutput = res.squeeze()\n", | |
| "cv2.imwrite(\"footprint5a.png\", res)\n", | |
| "cv2.imwrite(\"footprint5.png\", finalOutput)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import cv2\n", | |
| "\n", | |
| "im = cv2.imread('spies.jpg')\n", | |
| "#im = np.ones((20, 20)) * np.arange(20)\n", | |
| "#im = im[:,:,None] # Add singleton dimension\n", | |
| "footprint = np.array([[0,1,0],[1,1,1],[0,1,0]])\n", | |
| "footprint = footprint[:,:,None] # Add singleton dimension\n", | |
| "def test(x):\n", | |
| " return (x*0.5).sum()\n", | |
| "\n", | |
| "res = ndimage.generic_filter(im, test, footprint=footprint)\n", | |
| "finalOutput = res.squeeze()\n", | |
| "cv2.imwrite(\"footprint6a.png\", res)\n", | |
| "cv2.imwrite(\"footprint6.png\", finalOutput)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!showme footprint6.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import cv2\n", | |
| "\n", | |
| "img = cv2.imread('spies.jpg')\n", | |
| "blue, green, red = cv2.split(img)\n", | |
| "cv2.imshow('blue', blue)\n", | |
| "cv2.imshow('green', green)\n", | |
| "cv2.imshow('red', red)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| " \n", | |
| "img = cv2.imread('circles.png', 1)\n", | |
| "hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "#in the image:\n", | |
| "red equals 237, green equals 28, and blue equals 36\n", | |
| "87 95 16" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!python converter.py 87 95 16" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "python: can't open file 'converter.py': [Errno 2] No such file or directory\r\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 2, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!locate converter.py" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "lower_range = np.array([169, 100, 100], dtype=np.uint8)\n", | |
| "upper_range = np.array([189, 255, 255], dtype=np.uint8)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "#lower_range = np.array([67, 90, 90], dtype=np.uint8)\n", | |
| "#upper_range = np.array([107, 255, 255], dtype=np.uint8)\n", | |
| "lower_range = np.array([90, 67, 90], dtype=np.uint8)\n", | |
| "upper_range = np.array([255, 107, 255], dtype=np.uint8)\n", | |
| " \n", | |
| "\n", | |
| "img = cv2.imread('footprint6.png', 1)\n", | |
| "hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n", | |
| "mask = cv2.inRange(hsv, lower_range, upper_range)\n", | |
| "\n", | |
| "cv2.imshow('mask',mask)\n", | |
| "cv2.imshow('image', img)\n", | |
| "\n", | |
| "cv2.imwrite(\"mask_colora.png\", mask)\n", | |
| "cv2.imwrite(\"image_colora.png\", img)\n", | |
| "\n\n\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "lower_range = np.array([100, 77, 100], dtype=np.uint8)\n", | |
| "upper_range = np.array([255, 97, 255], dtype=np.uint8)\n", | |
| " \n", | |
| "\n", | |
| "img = cv2.imread('spies.jpg', 1)\n", | |
| "hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\n", | |
| "mask = cv2.inRange(hsv, lower_range, upper_range)\n", | |
| "\n", | |
| "cv2.imshow('mask',mask)\n", | |
| "cv2.imshow('image', img)\n", | |
| " \n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "img = cv2.imread('sof.jpg') # load a dummy image\n", | |
| "while(1):\n", | |
| " cv2.imshow('img',img)\n", | |
| " k = cv2.waitKey(33)\n", | |
| " if k==27: # Esc key to stop\n", | |
| " break\n", | |
| " elif k==-1: # normally -1 returned,so don't print it\n", | |
| " continue\n", | |
| " else:\n", | |
| " print k # else print its value" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from datetime import datetime\n", | |
| "\n", | |
| "filename = datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3]\n", | |
| "print filename" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "%%writefile BW_Binary.py\n", | |
| "## use sys, cv2 and numpy packages\n", | |
| "import sys, cv2\n", | |
| "import numpy as np\n", | |
| "two = sys.argv[1]\n", | |
| "print two" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "Writing BW_Binary.py\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 4, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!ls images" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "0003.jpg\t hicks02.jpg\t hicks-test3.png NPimage.png\r\n", | |
| "%03d_junk.jpg\t hicks3a.png\t hicks-test.png numpx2.png\r\n", | |
| "bigblob.npy\t hicks3.png\t\t hicks-water1.png numpxh.png\r\n", | |
| "bigblob.npz\t hicks-lev.png\t hicks-water3.png numpxhr2.png\r\n", | |
| "Crop_woman.jpg\t hicks-mask2.png\t hicks-water.png numpxr2.png\r\n", | |
| "face_300.jpg\t hicks-mask3.png\t hicks-wave.png oldimage.jpg\r\n", | |
| "face_clone.jpg\t hicks-mask4.png\t image_float.jpg S-hicks01.jpg\r\n", | |
| "face_clone-roll.jpg hicks-mask5.png\t junk.jpg\t S-hicks02.jpg\r\n", | |
| "face_clonez.jpg hicks-mask.png\t mars.jpg\t temp.jpg\r\n", | |
| "face_gray.jpg\t hicks-post2.png\t mask-i.jpg\t temp-nose.jpg\r\n", | |
| "face_HSV.png\t hicks-post3.png\t mask-i.png\t test-color.jpg\r\n", | |
| "face-i.jpg\t hicks-post.png\t mask.jpg\t test-color.png\r\n", | |
| "face-i.png\t hicks-post-water1.png mask.png\t test_out.jpg\r\n", | |
| "face.jpg\t hicks-sqr3.png\t my.png\t test_out.png\r\n", | |
| "face_numpy.jpg\t hicks-sqr4.png\t newim.png\t text.jpg\r\n", | |
| "face.png\t hicks-sqr.png\t nose.jpg\t waves.jpg\r\n", | |
| "hicks01.jpg\t hicks-test2.png\t nose-rotate.jpg woman.jpg\r\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 9, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!python BW_Binary.py images/test-color.png" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "20170728011850126.png\r\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 10, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!showme 20170728011850126.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": 11, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!which python\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "To run file below:<br /> \n", | |
| "<b><i>python BW_Binary.py pylab-output.png</i></b><br />\n", | |
| "it will convert an image to black and white with name and date as filename <br /> \n", | |
| "<b>EXAMPLE: ' 20170723034405025.png '</b>" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "%%writefile BW_Binary.py\n", | |
| "#!/home/jack/anaconda2/bin/python\n", | |
| "# To run file: python BW_Binary.py pylab-output.png\n", | |
| "## use sys, cv2 and numpy packages\n", | |
| "import sys, cv2\n", | |
| "import numpy as np\n", | |
| "from datetime import datetime\n", | |
| "imageName = sys.argv[1]\n", | |
| "#imageName = 'spies.jpg'\n", | |
| "## main function\n", | |
| "## read source image and get its shape\n", | |
| "sourceImage = cv2.imread(imageName, -1) # -1 is used to read the image as is\n", | |
| "imgRows, imgCols, imgChannels = np.shape(sourceImage)\n", | |
| "\n", | |
| "## color to gray scale\n", | |
| "grayImage = cv2.cvtColor(sourceImage, cv2.COLOR_RGB2GRAY) # cv2.COLOR_RGB2GRAY = 7\n", | |
| "grayImgName = 'gray_' + imageName\n", | |
| "\n", | |
| "## gray to binary: threshold = 100 (arbitrary); maxValue = 255; type = cv2.THRESH_BINARY\n", | |
| "flag, binaryImage = cv2.threshold(grayImage, 100, 255, cv2.THRESH_BINARY) # cv2.THRESH_BINARY = 0\n", | |
| "binaryImgName = 'binary_' + imageName\n", | |
| "baseD = datetime.utcnow().strftime('%Y%m%d%H%M%S%f')[:-3]\n", | |
| "filename = baseD+\".png\"\n", | |
| "cv2.imwrite(filename, binaryImage)\n", | |
| "print filename" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "Overwriting BW_Binary.py\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 7, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "%%writefile test.py\n", | |
| "\n", | |
| "import sys, cv2\n", | |
| "import numpy as np\n", | |
| "words = str(sys.argv)\n", | |
| "print words" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!python test.py binaryImgName.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!showme grayImgName.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "%%writefile converter.py\n", | |
| "import sys\n", | |
| "import numpy as np\n", | |
| "import cv2\n", | |
| " \n", | |
| "blue = sys.argv[1]\n", | |
| "green = sys.argv[2]\n", | |
| "red = sys.argv[3] \n", | |
| " \n", | |
| "color = np.uint8([[[blue, green, red]]])\n", | |
| "hsv_color = cv2.cvtColor(color, cv2.COLOR_BGR2HSV)\n", | |
| " \n", | |
| "hue = hsv_color[0][0][0]\n", | |
| " \n", | |
| "print(\"Lower bound is :\"),\n", | |
| "print(\"[\" + str(hue-10) + \", 100, 100]\\n\")\n", | |
| " \n", | |
| "print(\"Upper bound is :\"),\n", | |
| "print(\"[\" + str(hue + 10) + \", 255, 255]\")" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "stream", | |
| "name": "stdout", | |
| "text": [ | |
| "Writing converter.py\n" | |
| ] | |
| } | |
| ], | |
| "execution_count": 12, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import cv2\n", | |
| "\n", | |
| "img = cv2.imread('spies.jpg')\n", | |
| "red = img[:,:,2]\n", | |
| "cv2.imshow('red', red)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import cv2\n", | |
| "\n", | |
| "im = cv2.imread('spies.jpg')\n", | |
| "#im = np.ones((20, 20)) * np.arange(20)\n", | |
| "#im = im[:,:,None] # Add singleton dimension\n", | |
| "footprint = np.array([[0,0,1,0],\n", | |
| " [0,0,0,0],\n", | |
| " [0,0,0,0],\n", | |
| " [1,0,0,0]])\n", | |
| "footprint = footprint[:,:,None] # Add singleton dimension\n", | |
| "def test(x):\n", | |
| " return (x*0.5).sum() \n", | |
| "res = ndimage.generic_filter(im, test, isz, footprint=footprint)\n", | |
| "cv2.imwrite(\"footprint2.png\", res)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!showme footprint2.png" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "import scipy\n", | |
| "from scipy import ndimage\n", | |
| "\n", | |
| "d = np.array([[0, 0, 0, 0],[0, 1, 6, 0],[0, 0, 0, 0]]) #this represents an image array [1, 6], surrounded by zeroes\n", | |
| "d = d.astype(float)\n", | |
| "m = np.array([[1, 1, 1, 1],[1, 0, 0, 1],[1, 1, 1, 1]]) #the ones in the mask have a meaning of 'invalid'\n", | |
| "md = np.ma.masked_array(d, mask = m)\n", | |
| "\n", | |
| "f = np.array([[0, 1, 0],[1, 1, 1],[0, 1, 0]]) #small footprint\n", | |
| "f = f[:,:,None] # Add singleton dimension\n", | |
| "\n", | |
| "def fnc(x):\n", | |
| " return (x*0.5).sum() \n", | |
| "im = cv2.imread('avg.png')\n", | |
| "\n", | |
| "avg = scipy.ndimage.generic_filter(im, fnc, footprint = f, mode = 'constant', cval = 0.0)\n", | |
| "avg1 = scipy.ndimage.generic_filter(avg, fnc, footprint = f, mode = 'constant', cval = 0.0)\n", | |
| "\n", | |
| "kernel = np.ones((5,5),np.float32)/25\n", | |
| "dst = cv2.filter2D(avg1,-1,kernel)\n", | |
| "\n\n", | |
| "avg4 = scipy.ndimage.generic_filter(dst, fnc, footprint = f, mode = 'constant', cval = 0.0)\n", | |
| "#finalOutput = avg.squeeze()\n", | |
| "cv2.imwrite('avg4.png', avg4)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# AVERAGE Blur\n", | |
| "import cv2\n", | |
| "img0 = cv2.imread(\"face.png\")\n", | |
| "blurImg = cv2.blur(img0,(10,10)) #You can change the kernel size as you want\n", | |
| "cv2.imshow('Average',blurImg)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# GAUSSIAN Blur\n", | |
| "import cv2\n", | |
| "img0 = cv2.imread(\"face.png\")\n", | |
| "gausBlur = cv2.GaussianBlur(img0, (5,5),0) #you can change the kernel size\n", | |
| "cv2.imshow('Gaussian', gausBlur)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# MEDIAN Blur\n", | |
| "import cv2\n", | |
| "img0 = cv2.imread(\"face.png\")\n", | |
| "\n", | |
| "medBlur = cv2.medianBlur(img0,5)\n", | |
| "cv2.imshow('Median', medBlur)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# BILATERAL Blur\n", | |
| "import cv2\n", | |
| "img0 = cv2.imread(\"face.png\")\n", | |
| "\n", | |
| "bilFilter = cv2.bilateralFilter(img0,9,75,75)\n", | |
| "cv2.imshow('Bilateral', bilFilter)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2 \n", | |
| "import numpy as np \n", | |
| "\n", | |
| "#reading the image\n", | |
| "img = cv2.imread('face.png') #displaying the frames\n", | |
| " \n", | |
| "gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", | |
| "cv2.imshow('gray',gray)\n", | |
| "blur = cv2.GaussianBlur(gray,(15,15),0)\n", | |
| "cv2.imshow('blur',blur)\n", | |
| "ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n", | |
| "\n", | |
| "mask, contours, _2= cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n", | |
| "cv2.imshow('mask',mask)\n", | |
| "cv2.imshow('img',img)\n", | |
| "cv2.imshow('thresh1',thresh1)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows() " | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2 \n", | |
| "import numpy as np \n", | |
| "\n", | |
| "#reading the image\n", | |
| "img = cv2.imread('face.png') #displaying the frames\n", | |
| " \n", | |
| "gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n", | |
| "#cv2.imshow('gray',gray)\n", | |
| "blur = cv2.GaussianBlur(gray,(15,15),0)\n", | |
| "#cv2.imshow('blur',blur)\n", | |
| "ret,thresh1 = cv2.threshold(blur,70,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)\n", | |
| "\n", | |
| "mask, contours, _2= cv2.findContours(thresh1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) \n", | |
| "\n", | |
| "cv2.imshow('mask', mask)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows() " | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!wget -O woman.jpg https://upload.wikimedia.org/wikipedia/commons/thumb/0/08/Angry_woman.jpg/1280px-Angry_woman.jpg" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!wget -O detect.jpg https://i.stack.imgur.com/V6562.jpg" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!python face_landmark_detection.py data/shape_predictor_68_face_landmarks.dat imagefolder\\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "%%writefile face_landmark_detection.py\n", | |
| "#!/usr/bin/python\n", | |
| "# The contents of this file are in the public domain. See LICENSE_FOR_EXAMPLE_PROGRAMS.txt\n", | |
| "#\n", | |
| "# This example program shows how to find frontal human faces in an image and\n", | |
| "# estimate their pose. The pose takes the form of 68 landmarks. These are\n", | |
| "# points on the face such as the corners of the mouth, along the eyebrows, on\n", | |
| "# the eyes, and so forth.\n", | |
| "#\n", | |
| "# This face detector is made using the classic Histogram of Oriented\n", | |
| "# Gradients (HOG) feature combined with a linear classifier, an image pyramid,\n", | |
| "# and sliding window detection scheme. The pose estimator was created by\n", | |
| "# using dlib's implementation of the paper:\n", | |
| "# One Millisecond Face Alignment with an Ensemble of Regression Trees by\n", | |
| "# Vahid Kazemi and Josephine Sullivan, CVPR 2014\n", | |
| "# and was trained on the iBUG 300-W face landmark dataset.\n", | |
| "#\n", | |
| "# Also, note that you can train your own models using dlib's machine learning\n", | |
| "# tools. See train_shape_predictor.py to see an example.\n", | |
| "#\n", | |
| "# You can get the shape_predictor_68_face_landmarks.dat file from:\n", | |
| "# http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\n", | |
| "#\n", | |
| "# COMPILING/INSTALLING THE DLIB PYTHON INTERFACE\n", | |
| "# You can install dlib using the command:\n", | |
| "# pip install dlib\n", | |
| "#\n", | |
| "# Alternatively, if you want to compile dlib yourself then go into the dlib\n", | |
| "# root folder and run:\n", | |
| "# python setup.py install\n", | |
| "# or\n", | |
| "# python setup.py install --yes USE_AVX_INSTRUCTIONS\n", | |
| "# if you have a CPU that supports AVX instructions, since this makes some\n", | |
| "# things run faster. \n", | |
| "#\n", | |
| "# Compiling dlib should work on any operating system so long as you have\n", | |
| "# CMake and boost-python installed. On Ubuntu, this can be done easily by\n", | |
| "# running the command:\n", | |
| "# sudo apt-get install libboost-python-dev cmake\n", | |
| "#\n", | |
| "# Also note that this example requires scikit-image which can be installed\n", | |
| "# via the command:\n", | |
| "# pip install scikit-image\n", | |
| "# Or downloaded from http://scikit-image.org/download.html. \n", | |
| "\n", | |
| "import sys\n", | |
| "import os\n", | |
| "import dlib\n", | |
| "import glob\n", | |
| "from skimage import io\n", | |
| "import cv2\n", | |
| "if len(sys.argv) != 3:\n", | |
| " print(\n", | |
| " \"Give the path to the trained shape predictor model as the first \"\n", | |
| " \"argument and then the directory containing the facial images.\\n\"\n", | |
| " \"For example, if you are in the python_examples folder then \"\n", | |
| " \"execute this program by running:\\n\"\n", | |
| " \" ./face_landmark_detection.py shape_predictor_68_face_landmarks.dat ../examples/faces\\n\"\n", | |
| " \"You can download a trained facial shape predictor from:\\n\"\n", | |
| " \" http://dlib.net/files/shape_predictor_68_face_landmarks.dat.bz2\")\n", | |
| " exit()\n", | |
| "\n", | |
| "predictor_path = sys.argv[1]\n", | |
| "faces_folder_path = sys.argv[2]\n", | |
| "\n", | |
| "detector = dlib.get_frontal_face_detector()\n", | |
| "predictor = dlib.shape_predictor(predictor_path)\n", | |
| "win = dlib.image_window()\n", | |
| "\n", | |
| "for f in glob.glob(os.path.join(faces_folder_path, \"*.jpg\")):\n", | |
| " print(\"Processing file: {}\".format(f))\n", | |
| " img = io.imread(f)\n", | |
| "\n", | |
| " win.clear_overlay()\n", | |
| " win.set_image(img)\n", | |
| "\n", | |
| " # Ask the detector to find the bounding boxes of each face. The 1 in the\n", | |
| " # second argument indicates that we should upsample the image 1 time. This\n", | |
| " # will make everything bigger and allow us to detect more faces.\n", | |
| " dets = detector(img, 1)\n", | |
| " print(\"Number of faces detected: {}\".format(len(dets)))\n", | |
| " for k, d in enumerate(dets):\n", | |
| " print(\"Detection {}: Left: {} Top: {} Right: {} Bottom: {}\".format(\n", | |
| " k, d.left(), d.top(), d.right(), d.bottom()))\n", | |
| " # Get the landmarks/parts for the face in box d.\n", | |
| " shape = predictor(img, d)\n", | |
| " print(\"Part 0: {}, Part 1: {} ...\".format(shape.part(0),\n", | |
| " shape.part(1)))\n", | |
| " # Draw the face landmarks on the screen.\n", | |
| " \n", | |
| " cv2.imshow('mask', shape)\n", | |
| " cv2.waitKey(0)\n", | |
| " cv2.destroyAllWindows() " | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import numpy as np\n", | |
| "import blender\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "img1 = cv2.imread(\"avg.png\")\n", | |
| "\n", | |
| "kernel = np.load('sharpen_filter.npy')\n", | |
| "sharpen = cv2.filter2D(img1,-1,kernel) \n", | |
| "\n\n", | |
| "cv2.imshow('sharpen',sharpen)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!wget -O lines.jpg http://gdj.graphicdesignjunction.com/wp-content/uploads/2010/06/vector-graphic/vector-graphic-design-12.jpg" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "kernel = np.array([ [0,0,0,0,0],\n", | |
| " [0,0,-.5,0,0],\n", | |
| " [0,-.5,3,-.5,0],\n", | |
| " [0,0,-.5,0,0],\n", | |
| " [0,0,0,0,0] ],np.float32)\n", | |
| "\n", | |
| "np.save('sharpen_filter.npy', kernel) # .npy extension is added if not given\n", | |
| "d = np.load('sharpen_filter.npy')\n", | |
| "kernel == d" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "kernel = np.array([ [-1,0,2,0,-1],\n", | |
| " [-1,0,2,0,-1],\n", | |
| " [-1,0,2,0,-1],\n", | |
| " [-1,0,2,0,-1],\n", | |
| " [-1,0,2,0,-1] ],np.float32)\n", | |
| "\n", | |
| "np.save('numpy-filters/edge_filter.npy', kernel) # .npy extension is added if not given\n", | |
| "d = np.load('numpy-filters/edge_filter.npy')\n", | |
| "kernel == d" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!wget -O spies.jpg https://i1.wp.com/theheavensdeclare.net/wp-content/uploads/2012/08/Tissot_The_Harlot_of_Jericho_and_the_Two_Spies-in-Wikipedia-public-domain.jpg?ssl=1" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "source": [ | |
| "import cv2\n", | |
| "help(cv2)" | |
| ], | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "markdown", | |
| "source": [ | |
| "fillConvexPoly(...)\n", | |
| " fillConvexPoly(img, points, color[, lineType[, shift]]) -> img\n", | |
| " \n", | |
| " fillPoly(...)\n", | |
| " fillPoly(img, pts, color[, lineType[, shift[, offset]]]) -> img\n", | |
| " \n", | |
| " filter2D(...)\n", | |
| " filter2D(src, ddepth, kernel[, dst[, anchor[, delta[, borderType]]]]) -> dst\n", | |
| " \n", | |
| " filterSpeckles(...)\n", | |
| " filterSpeckles(img, newVal, maxSpeckleSize, maxDiff[, buf]) -> img, buf\n", | |
| " \n", | |
| " findChessboardCorners(...)\n", | |
| " findChessboardCorners(image, patternSize[, corners[, flags]]) -> retval, corners\n", | |
| " \n", | |
| " findCirclesGrid(...)\n", | |
| " findCirclesGrid(image, patternSize[, centers[, flags[, blobDetector]]]) -> retval, centers\n", | |
| " \n", | |
| " findContours(...)\n", | |
| " findContours(image, mode, method[, contours[, hierarchy[, offset]]]) -> image, contours, hierarchy" | |
| ], | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "#imAg = cv2.imread('lines.jpg')\n", | |
| "img = cv2.imread('images/face_300.jpg')\n", | |
| "\n", | |
| "kernel = np.load('numpy-filters/sharpen_filter.npy')\n", | |
| "\n", | |
| "new_img = cv2.filter2D(img,-1,kernel) # ddepth = -1, means destination image has depth same as input image.\n", | |
| "cv2.imshow('img',img)\n", | |
| "cv2.imshow('new',new_img)\n", | |
| "\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "img = cv2.imread('images/face_300.jpg')\n", | |
| "\n", | |
| "kernel = np.array([[1, 1, 1, 1, 1],\n", | |
| " [1, 1, 1, 1, 1],\n", | |
| " [1, 1, -23, 1, 1],\n", | |
| " [1, 1, 1, 1, 1],\n", | |
| " [1, 1, 1, 1, 1]],np.float32)\n", | |
| "\n", | |
| "# kernel should be floating point type.\n", | |
| "\n", | |
| "new_img = cv2.filter2D(img,-1,kernel) # ddepth = -1, means destination image has depth same as input image.\n", | |
| "#cv2.imshow('img',img)\n", | |
| "cv2.imshow('new',new_img)\n", | |
| "cv2.imwrite(\"lotsawhite.jpg\", new_img)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "img = cv2.imread('images/face_gray.jpg')\n", | |
| "\n", | |
| "kernel = np.array([ [-1,-.5,2,-.5,-1],\n", | |
| " [-1,-.5,3,-.5,-1],\n", | |
| " [-1,-.5,5,-.5,-1],\n", | |
| " [-1,-.5,3,-.5,-1],\n", | |
| " [-1,-.5,2,-.5,-1] ],np.float32) # kernel should be floating point type.\n", | |
| "\n", | |
| "new_img = cv2.filter2D(img,-1,kernel) # ddepth = -1, means destination image has depth same as input image.\n", | |
| "#cv2.imshow('img',img)\n", | |
| "cv2.imshow('new',new_img)\n", | |
| "\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "#img = cv2.imread('spies.jpg')\n", | |
| "#img = cv2.imread('spies.jpg', 0)\n", | |
| "img = cv2.imread('image1.png', 0)\n", | |
| "\n", | |
| "kernel = np.array([ [-1,.5,.5,.5,-1],\n", | |
| " [-1,.5,2,.5,-1],\n", | |
| " [-1,.5,2,.5,-1],\n", | |
| " [-1,.5,2,.5,-1],\n", | |
| " [-1,.5,.5,.5,-1] ],np.float32) # kernel should be floating point type.\n", | |
| "\n", | |
| "new_img = cv2.filter2D(img,-1,kernel) # ddepth = -1, means destination image has depth same as input image.\n", | |
| "#cv2.imshow('img',img)\n", | |
| "#ret,thresh1 = cv2.threshold(new_img,127,255,cv2.THRESH_BINARY)\n", | |
| "ret,thresh1 = cv2.threshold(new_img,150,255,cv2.THRESH_BINARY)\n", | |
| "cv2.imshow('new_img',new_img)\n", | |
| "cv2.imshow('thresh1',thresh1)\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "https://docs.gimp.org/en/plug-in-convmatrix.html" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "\n", | |
| "kernel = np.array([ [0,-1,0],\n", | |
| " [-1,5,-1],\n", | |
| " [0,-1,0] ],np.float32) # kernel should be floating point type.\n", | |
| "\n", | |
| "new_img = cv2.filter2D(img,-1,kernel) # ddepth = -1, means destination image has depth same as input image.\n", | |
| "cv2.imshow('img',img)\n", | |
| "cv2.imshow('new',new_img)\n", | |
| "\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "def sharpen(imgin):\n", | |
| " img = cv2.imread(imgin)\n", | |
| " blured = cv2.GaussianBlur(img, (0, 0), 2.5)\n", | |
| " return cv2.addWeighted(img, 1.4, blured, -0.4, 0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "newim = sharpen(\"image1.png\")\n", | |
| "cv2.imwrite(\"img1new.png\", newim)\n", | |
| "\n", | |
| "cv2.imshow('newim', newim)\n", | |
| "\n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "img2 = cv2.imread(\"image2.png\")\n", | |
| "img_mask = cv2.imread(\"face.png\")\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "#result2 = blender.weighted_average(img1, img2, percent=0.8)\n", | |
| "#result2 = blender.poisson_blend(img1, img2, img_mask, offset=(10, 10))\n", | |
| "result2 = blender.apply_mask(img1, img2)\n", | |
| "plt.imshow(result2)\n", | |
| "\ncv2.imwrite(\"junk.png,\", result2)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "from matplotlib import pyplot as plt\n", | |
| " \n", | |
| "#roi is the object or region of object we need to find\n", | |
| "roi = cv2.imread('eye.png')\n", | |
| "hsv = cv2.cvtColor(roi,cv2.COLOR_BGR2HSV)\n", | |
| " \n", | |
| "#target is the image we search in\n", | |
| "target = cv2.imread('image1.png')\n", | |
| "hsvt = cv2.cvtColor(target,cv2.COLOR_BGR2HSV)\n", | |
| " \n", | |
| "M = cv2.calcHist([hsv],[0, 1], None, [180, 256], [0, 180, 0, 256] )\n", | |
| "I = cv2.calcHist([hsvt],[0, 1], None, [180, 256], [0, 180, 0, 256] )\n", | |
| " \n", | |
| "R = M/(I+1)\n", | |
| "print M.max(),I.max(),R.dtype\n", | |
| "#cv2.normalize(prob,prob,0,255,cv2.NORM_MINMAX,0)\n", | |
| " \n", | |
| "h,s,v = cv2.split(hsvt)\n", | |
| "B = R[h.ravel(),s.ravel()]\n", | |
| "B = np.minimum(B,1)\n", | |
| "B = B.reshape(hsvt.shape[:2])\n", | |
| " \n", | |
| "disc = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(10,10))\n", | |
| "cv2.filter2D(B,-1,disc,B)\n", | |
| "B = np.uint8(B)\n", | |
| "cv2.normalize(B,B,0,255,cv2.NORM_MINMAX)\n", | |
| "ret,thresh = cv2.threshold(B,50,255,0)\n", | |
| "res = cv2.bitwise_and(target,target,mask = thresh)\n", | |
| "cv2.imshow('nice',res)\n", | |
| "cv2.imshow('img',target)\n", | |
| "res = np.vstack((target,cv2.merge((B,B,B)),res))\n", | |
| "cv2.imwrite('thresh.png',thresh)\n", | |
| "cv2.imwrite('output.png',res)\n", | |
| "plt.figure(figsize=(20,10)) \n", | |
| "cv2.waitKey(0)\n", | |
| "cv2.destroyAllWindows()\n", | |
| "plt.imshow(res)\n", | |
| "##plt.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "cv2.getStructuringElement(cv2.THRESH_BINARY,(5,5))" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5))" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "img = cv2.imread('image1.png',0)\n", | |
| "#kernel = np.ones((5,5),np.uint8)\n", | |
| "kernel1 = np.ones((2,2),np.uint8)\n", | |
| "opening1b = cv2.morphologyEx(img, cv2.MORPH_ELLIPSE, kernel1)\n", | |
| "\n", | |
| "kernel2 = np.ones((4,4),np.uint8)\n", | |
| "opening2b = cv2.morphologyEx(img, cv2.MORPH_ELLIPSE, kernel2)\n", | |
| "\n", | |
| "kernel3 = np.ones((10,10),np.uint8)\n", | |
| "opening3b = cv2.morphologyEx(img, cv2.MORPH_ELLIPSE, kernel3)\n", | |
| "\n", | |
| "kernel4 = np.ones((20,20),np.uint8)\n", | |
| "opening4b = cv2.morphologyEx(img, cv2.MORPH_ELLIPSE, kernel4)\n", | |
| "\n\n\n\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(opening1b)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(opening2b)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(opening3b)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(opening4b)\n", | |
| "plt.show()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "img = cv2.imread('gradient.png',0)\n", | |
| "ret,thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)\n", | |
| "ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)\n", | |
| "ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)\n", | |
| "ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)\n", | |
| "ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)\n", | |
| "titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']\n", | |
| "images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]\n", | |
| "for i in xrange(6):\n", | |
| " plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')\n", | |
| " plt.title(titles[i])\n", | |
| " plt.xticks([]),plt.yticks([])\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "#img = cv2.imread('image1.png',0)\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "#kernel = np.ones((5,5),np.uint8)\n", | |
| "kernel1 = np.ones((2,2),np.uint8)\n", | |
| "opening1a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel1)\n", | |
| "\n", | |
| "kernel2 = np.ones((4,4),np.uint8)\n", | |
| "opening2a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel2)\n", | |
| "\n", | |
| "kernel3 = np.ones((10,10),np.uint8)\n", | |
| "opening3a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel3)\n", | |
| "\n", | |
| "kernel4 = np.ones((20,20),np.uint8)\n", | |
| "opening4a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel4)\n", | |
| "\n", | |
| "titles = ['2,2','4,4','10,10','20,20']\n", | |
| "images = [opening1a, opening2a, opening3a, opening4a]\n", | |
| "\n", | |
| "for i in xrange(4):\n", | |
| " plt.subplot(3,2,i+1),plt.imshow(images[i],'gray')\n", | |
| " plt.title(titles[i])\n", | |
| " plt.xticks([]),plt.yticks([])\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "#img = cv2.imread('image1.png',0)\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "#kernel = np.ones((5,5),np.uint8)\n", | |
| "kernel1 = np.ones((2,2),np.uint8)\n", | |
| "opening1a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel1)\n", | |
| "\n", | |
| "kernel2 = np.ones((4,4),np.uint8)\n", | |
| "opening2a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel2)\n", | |
| "\n", | |
| "kernel3 = np.ones((10,10),np.uint8)\n", | |
| "opening3a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel3)\n", | |
| "\n", | |
| "kernel4 = np.ones((20,20),np.uint8)\n", | |
| "opening4a = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel4)\n", | |
| "\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(opening1a)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(opening2a)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(opening3a)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(opening4a)\n", | |
| "plt.show()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "\n", | |
| "# Create a dummy input image.\n", | |
| "canvas = np.zeros((640, 750), dtype=np.uint8)\n", | |
| "canvas = cv2.circle(canvas, (640, 750), 20, (255,), -1)\n", | |
| "\n", | |
| "kernel = np.array([[-1, -1, -1],\n", | |
| " [-1, 4, -1],\n", | |
| " [-1, -1, -1]])\n", | |
| "\n", | |
| "dst = cv2.filter2D(canvas, -1, kernel)\n", | |
| "cv2.imwrite(\"filtered.png\", dst)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "import numpy as np\n", | |
| "%matplotlib inline\n", | |
| "# Create a dummy input image.\n", | |
| "canvas = np.zeros((100, 100), dtype=np.uint8)\n", | |
| "canvas = cv2.circle(canvas, (50, 50), 20, (255,), -1)\n", | |
| "\n\n", | |
| "#kernel = np.array([[0, 0, 0],\n", | |
| "# [0, 1, 0],\n", | |
| "# [0, 0, 0]])\n", | |
| "\n", | |
| "kernel = np.array([[-1.2, -1.2, -1.2],\n", | |
| " [-1.2, 11, -1.2],\n", | |
| " [-1.2, -1.2, -1.2]])\n", | |
| "\n\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "\n", | |
| "dst = cv2.filter2D(img, -1, kernel)\n", | |
| "cv2.imwrite(\"filtered.png\", dst)\n", | |
| "#plt.show(dst)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import functools\n", | |
| "%matplotlib inline\n", | |
| "fp = np.array([[0, 1, 0],\n", | |
| " [1, 1, 1],\n", | |
| " [0, 1, 0]], np.uint8)\n", | |
| "\n", | |
| "cv2.median_filter = functools.partial(scipy.ndimage.generic_filter,\n", | |
| " function=np.median,\n", | |
| " footprint=fp)\n", | |
| "\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "#dst = cv2.filter2D(img, -1, kernel)\n", | |
| "dst = cv2.filter2D(img, -1, kernel)\n", | |
| "cv2.imwrite(\"filtered.png\", dst)\n", | |
| "#plt.show(dst)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import functools\n", | |
| "%matplotlib inline\n", | |
| "fp = np.array([[0, 1, 0],\n", | |
| " [1, 1, 1],\n", | |
| " [0, 1, 0]], np.uint8)\n", | |
| "\n", | |
| "median_filter = functools.partial(scipy.ndimage.generic_filter,\n", | |
| " function=np.median,\n", | |
| " footprint=fp)\n", | |
| "print median_filter" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import numpy as np\n", | |
| "from scipy import ndimage\n", | |
| "import functools\n", | |
| "%matplotlib inline\n", | |
| "\n", | |
| "fp = np.array([[0, 1, 0],\n", | |
| " [1, 1, 1],\n", | |
| " [0, 1, 0]], np.uint8)\n", | |
| "\n", | |
| "median_filter = functools.partial(scipy.ndimage.generic_filter,\n", | |
| " function=np.median,\n", | |
| " footprint=fp)\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "#img = cv2.imread('image1.png',0)\n", | |
| "img = cv2.imread('image1.png')\n", | |
| "kernel = np.ones((5,5),np.uint8)\n", | |
| "opening = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel)\n", | |
| "plt.show(opening)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "img = cv2.imread('image1.png',0)\n", | |
| "#kernel = np.ones((5,5),np.uint8)\n", | |
| "kernel1 = np.ones((2,2),np.uint8)\n", | |
| "opening1 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel1)\n", | |
| "\n", | |
| "kernel2 = np.ones((4,4),np.uint8)\n", | |
| "opening2 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel2)\n", | |
| "\n", | |
| "kernel3 = np.ones((10,10),np.uint8)\n", | |
| "opening3 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel3)\n", | |
| "\n", | |
| "kernel4 = np.ones((20,20),np.uint8)\n", | |
| "opening4 = cv2.morphologyEx(img, cv2.MORPH_OPEN, kernel4)\n", | |
| "\n\n\n\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(opening1)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(opening2)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(opening3)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(opening4)\n", | |
| "plt.show()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "img = cv2.imread('image1.png',0)\n", | |
| "#kernel = np.ones((8,8),np.uint8)\n", | |
| "kernel = np.ones((4,4),np.uint8)\n", | |
| "dilate1 = cv2.dilate(img,kernel,iterations = 1)\n", | |
| "dilate2 = cv2.dilate(img,kernel,iterations = 5)\n", | |
| "dilate3 = cv2.dilate(img,kernel,iterations = 10)\n", | |
| "dilate4 = cv2.dilate(img,kernel,iterations = 20)\n", | |
| "\n\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(dilate1)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(dilate2)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(dilate3)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(dilate4)\n", | |
| "plt.show()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "import numpy as np\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "img = cv2.imread('image1.png',0)\n", | |
| "#kernel = np.ones((8,8),np.uint8)\n", | |
| "kernel = np.ones((4,4),np.uint8)\n", | |
| "erosion1 = cv2.erode(img,kernel,iterations = 1)\n", | |
| "erosion2 = cv2.erode(img,kernel,iterations = 5)\n", | |
| "erosion3 = cv2.erode(img,kernel,iterations = 10)\n", | |
| "erosion4 = cv2.erode(img,kernel,iterations = 20)\n", | |
| "\n\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(erosion1)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(erosion2)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(erosion3)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(erosion4)\n", | |
| "plt.show()\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "img2 = cv2.imread(\"image2.png\")\n", | |
| "img_mask = cv2.imread(\"mask_in.png\")\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "result1 = blender.weighted_average(img1, img2, percent=0.8)\n", | |
| "plt.imshow(result1)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "img1 = cv2.imread(\"image2.png\")\n", | |
| "img2 = cv2.imread(\"image2.png\")\n", | |
| "img_mask = cv2.imread(\"image2.png\")\n", | |
| "#img_mask = cv2.imread(\"face.png\")\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "result1 = blender.alpha_feathering(img1, img2, img_mask, blur_radius=5)\n", | |
| "result2 = blender.alpha_feathering(img1, img2, img_mask, blur_radius=15)\n", | |
| "result3 = blender.alpha_feathering(img1, img2, img_mask, blur_radius=25)\n", | |
| "result4 = blender.alpha_feathering(img1, img2, img_mask, blur_radius=35)\n", | |
| "\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import matplotlib as plt\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(result1)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(result2)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(result3)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(result4)\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "error", | |
| "ename": "AttributeError", | |
| "evalue": "'module' object has no attribute 'subplot'", | |
| "traceback": [ | |
| "\u001b[0;31m\u001b[0m", | |
| "\u001b[0;31mAttributeError\u001b[0mTraceback (most recent call last)", | |
| "\u001b[0;32m<ipython-input-14-0b5bcdc89e9e>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 1\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m----> 2\u001b[0;31m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 3\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msubplot\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mresult2\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;31mAttributeError\u001b[0m: 'module' object has no attribute 'subplot'" | |
| ] | |
| } | |
| ], | |
| "execution_count": 14, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "img2 = cv2.imread(\"image2.png\")\n", | |
| "plt.figure(figsize=(20,10))\n", | |
| "result1 = blender.weighted_average(img1, img2, percent=0.2)\n", | |
| "result2 = blender.weighted_average(img1, img2, percent=0.4)\n", | |
| "result3 = blender.weighted_average(img1, img2, percent=0.6)\n", | |
| "result4 = blender.weighted_average(img1, img2, percent=0.8)\n", | |
| "\n", | |
| "plt.subplot(2, 2, 1)\n", | |
| "plt.imshow(result1)\n", | |
| "plt.subplot(2, 2, 2)\n", | |
| "plt.imshow(result2)\n", | |
| "plt.subplot(2, 2, 3)\n", | |
| "plt.imshow(result3)\n", | |
| "plt.subplot(2, 2, 4)\n", | |
| "plt.imshow(result4)\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [ | |
| { | |
| "output_type": "error", | |
| "ename": "ImportError", | |
| "evalue": "No module named blender", | |
| "traceback": [ | |
| "\u001b[0;31m\u001b[0m", | |
| "\u001b[0;31mImportError\u001b[0mTraceback (most recent call last)", | |
| "\u001b[0;32m<ipython-input-15-bef951e23dae>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[0;32m----> 1\u001b[0;31m \u001b[0;32mimport\u001b[0m \u001b[0mblender\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 2\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mmatplotlib\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mpyplot\u001b[0m \u001b[0;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 3\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mcv2\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 4\u001b[0m \u001b[0;32mfrom\u001b[0m \u001b[0mPIL\u001b[0m \u001b[0;32mimport\u001b[0m \u001b[0mImage\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 5\u001b[0m \u001b[0mget_ipython\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmagic\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34mu'matplotlib inline'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", | |
| "\u001b[0;31mImportError\u001b[0m: No module named blender" | |
| ] | |
| } | |
| ], | |
| "execution_count": 15, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "import pylab\n", | |
| "import cv2\n", | |
| "from PIL import Image\n", | |
| "%matplotlib inline\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "img2 = cv2.imread(\"image2.png\")\n", | |
| "pylab.figure(figsize=(20,10))\n", | |
| "result1 = blender.weighted_average(img1, img2, percent=0.2)\n", | |
| "result2 = blender.weighted_average(img1, img2, percent=0.4)\n", | |
| "result3 = blender.weighted_average(img1, img2, percent=0.6)\n", | |
| "result4 = blender.weighted_average(img1, img2, percent=0.8)\n", | |
| "\n", | |
| "pylab.subplot(2, 2, 1)\n", | |
| "pylab.imshow(result1)\n", | |
| "pylab.subplot(2, 2, 2)\n", | |
| "pylab.imshow(result2)\n", | |
| "pylab.subplot(2, 2, 3)\n", | |
| "pylab.imshow(result3)\n", | |
| "pylab.subplot(2, 2, 4)\n", | |
| "pylab.imshow(result4)\n", | |
| "pylab.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "import pylab\n", | |
| "import cv2\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "from PIL import Image\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "fig = pylab.figure(figsize=(20,10))\n", | |
| "ax_size = [0,0,.5,.5]\n", | |
| "fig.add_axes(ax_size)\n", | |
| "\n", | |
| "pylab.imshow(img2, vmin=1, vmax=2, origin='upper')\n", | |
| "pylab.axis('off')\n", | |
| "## As you noted.\n", | |
| "pylab.savefig('pylab-output.png',bbox_inches='tight', pad_inches=0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "/home/jack/Desktop/face_morpher/facemorpher/blender.py" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import blender\n", | |
| "import pylab\n", | |
| "import cv2\n", | |
| "from matplotlib import pyplot as plt\n", | |
| "from PIL import Image\n", | |
| "img1 = cv2.imread(\"image1.png\")\n", | |
| "fig = pylab.figure(figsize=(20,10))\n", | |
| "\n", | |
| "# The ax_size controls the image size - not just the display size but 'image saved' size also\n", | |
| "#ax_size = [0,0,1,1]\n", | |
| "ax_size = [0,0,.5,.5]\n", | |
| "fig.add_axes(ax_size)\n", | |
| "\n", | |
| "# The origin='upper' can be changed to origin='lower' to flip the images upside down\n", | |
| "pylab.imshow(img2, vmin=1, vmax=2, origin='upper')\n", | |
| "\n", | |
| "# With pylab.axis('on') the image is shown with axis marks showing the original image's size in pixels \n", | |
| "pylab.axis('off')\n", | |
| "# The inches='tight' removes the large border common to plot images\n", | |
| "# pad_inches=0 alsocontrols the image padding\n", | |
| "pylab.savefig('pylab-output.png',bbox_inches='tight', pad_inches=0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "!wget -o Golum.jpg http://media.boingboing.net/wp-content/uploads/2017/04/o-RECEP-TAYYIP-ERDOGAN-GOLLUM-SMEAGOL-LORD-OF-THE-facebook.jpg" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from skimage import data\n", | |
| "cat = data.chelsea()\n", | |
| "reddish = cat[:, :, 0] > 160\n", | |
| "cat[reddish] = [0, 255, 0]\n", | |
| "plt.imshow(cat)\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import pylab\n", | |
| "colors = [[ 1., 10., 20., 30., 40., 50., 60., 70., 80., 90. ],\n", | |
| "[ 100. , 110., 120., 130., 140., 150., 160., 170., 180., 190. ],\n", | |
| "[ 200., 210., 220, 230. , 240., 250, 71.75, 75.25, 76.75, 74. ],\n", | |
| "[ 76., 74.75, 72.5, 72.25 ,75.25, 76.5, 73.5, 73., 75.25, 75.75],\n", | |
| "[ 75., 72.5, 72.25, 74.5, 73.25, 73.25, 74.5, 73.25, 73.5, 76.5 ],\n", | |
| "[ 74.5, 72., 69.5, 73.25, 73.75, 72., 76.75, 77., 74.25, 76.5 ],\n", | |
| "[ 72.5, 73.75, 72.75, 75.75, 78., 76.75, 77.75, 78.75, 77.25, 74. ],\n", | |
| "[ 74.5, 74.25, 74.75, 78.75, 80.75, 79.25, 74.5, 75., 76.25, 73. ],\n", | |
| "[ 75.5, 71.5, 71.75, 78.75, 80.25, 77.5, 75., 73.25, 72.25, 72.75],\n", | |
| "[ 77.5, 74.5, 72., 77.75, 78.25, 74., 76.75, 75.75, 74.25, 256. ]]\n", | |
| "\n", | |
| "fig = pylab.figure(frameon=False)\n", | |
| "ax_size = [0,0,1,1]\n", | |
| "fig.add_axes(ax_size)\n", | |
| "pylab.imshow(colors,vmin=0, vmax=256, origin='upper')\n", | |
| "pylab.axis('off')\n", | |
| "## As you noted.\n", | |
| "pylab.savefig('output.png',bbox_inches='tight', pad_inches=0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# As demonstated in the cells above this array may be saved to a *.npy file\n", | |
| "colors = [[ 1., 10., 20., 30., 40., 50., 60., 70., 80., 90. ],\n", | |
| "[ 100. , 110., 120., 130., 140., 150., 160., 170., 180., 190. ],\n", | |
| "[ 200., 210., 220, 230. , 240., 250, 71.75, 75.25, 76.75, 74. ],\n", | |
| "[ 76., 74.75, 72.5, 72.25 ,75.25, 76.5, 73.5, 73., 75.25, 75.75],\n", | |
| "[ 75., 72.5, 72.25, 74.5, 73.25, 73.25, 74.5, 73.25, 73.5, 76.5 ],\n", | |
| "[ 74.5, 72., 69.5, 73.25, 73.75, 72., 76.75, 77., 74.25, 76.5 ],\n", | |
| "[ 72.5, 73.75, 72.75, 75.75, 78., 76.75, 77.75, 78.75, 77.25, 74. ],\n", | |
| "[ 74.5, 74.25, 74.75, 78.75, 80.75, 79.25, 74.5, 75., 76.25, 73. ],\n", | |
| "[ 75.5, 71.5, 71.75, 78.75, 80.25, 77.5, 75., 73.25, 72.25, 72.75],\n", | |
| "[ 77.5, 74.5, 72., 77.75, 78.25, 74., 76.75, 75.75, 74.25, 256. ]]\n", | |
| "\n", | |
| "np.save('colors.npy', colors) # .npy extension is added if not given\n", | |
| "#Load and verify array\n", | |
| "d = np.load('colors.npy')\n", | |
| "colors == d\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import matplotlib.pyplot as plt\n", | |
| "%matplotlib inline\n", | |
| "#loading and using the npy file created above\n", | |
| "colors = np.load('colors.npy')\n", | |
| "fig = plt.figure(frameon=False)\n", | |
| "ax_size = [0,0,1,1]\n", | |
| "fig.add_axes(ax_size)\n", | |
| "plt.imshow(colors,vmin=0, vmax=256, origin='upper')\n", | |
| "plt.axis('off')\n", | |
| "## As you noted.\n", | |
| "plt.savefig('output.png',bbox_inches='tight', pad_inches=0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "from skimage import data\n", | |
| "colors = [[ 1., 10., 20., 30., 40., 50., 60., 70., 80., 90. ],\n", | |
| "[ 100. , 110., 120., 130., 140., 150., 160., 170., 180., 190. ],\n", | |
| "[ 200., 210., 220, 230. , 240., 250, 71.75, 75.25, 76.75, 74. ],\n", | |
| "[ 76., 74.75, 72.5, 72.25 ,75.25, 76.5, 73.5, 73., 75.25, 75.75],\n", | |
| "[ 75., 72.5, 72.25, 74.5, 73.25, 73.25, 74.5, 73.25, 73.5, 76.5 ],\n", | |
| "[ 74.5, 72., 69.5, 73.25, 73.75, 72., 76.75, 77., 74.25, 76.5 ],\n", | |
| "[ 72.5, 73.75, 72.75, 75.75, 78., 76.75, 77.75, 78.75, 77.25, 74. ],\n", | |
| "[ 74.5, 74.25, 74.75, 78.75, 80.75, 79.25, 74.5, 75., 76.25, 73. ],\n", | |
| "[ 75.5, 71.5, 71.75, 78.75, 80.25, 77.5, 75., 73.25, 72.25, 72.75],\n", | |
| "[ 77.5, 74.5, 72., 77.75, 78.25, 74., 76.75, 75.75, 74.25, 256. ]]\n", | |
| "\n", | |
| "#kernel = np.array([ [0,0,0,0,0],\n", | |
| "# [0,0,-.5,0,0],\n", | |
| "# [0,-.5,3,-.5,0],\n", | |
| "# [0,0,-.5,0,0],\n", | |
| "# [0,0,0,0,0] ],np.float32)\n", | |
| "\n", | |
| "np.save('colors.npy', colors) # .npy extension is added if not given\n", | |
| "d = np.load('colors.npy')\n", | |
| "colors == d\n" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import matplotlib.pyplot as plt\n", | |
| "%matplotlib inline\n", | |
| "colors = [[ 1., 10., 20., 30., 40., 50., 60., 70., 80., 90. ],\n", | |
| "[ 100. , 110., 120., 130., 140., 150., 160., 170., 180., 190. ],\n", | |
| "[ 200., 210., 220, 230. , 240., 250, 71.75, 75.25, 76.75, 74. ],\n", | |
| "[ 76., 74.75, 72.5, 72.25 ,75.25, 76.5, 73.5, 73., 75.25, 75.75],\n", | |
| "[ 75., 72.5, 72.25, 74.5, 73.25, 73.25, 74.5, 73.25, 73.5, 76.5 ],\n", | |
| "[ 74.5, 72., 69.5, 73.25, 73.75, 72., 76.75, 77., 74.25, 76.5 ],\n", | |
| "[ 72.5, 73.75, 72.75, 75.75, 78., 76.75, 77.75, 78.75, 77.25, 74. ],\n", | |
| "[ 74.5, 74.25, 74.75, 78.75, 80.75, 79.25, 74.5, 75., 76.25, 73. ],\n", | |
| "[ 75.5, 71.5, 71.75, 78.75, 80.25, 77.5, 75., 73.25, 72.25, 72.75],\n", | |
| "[ 77.5, 74.5, 72., 77.75, 78.25, 74., 76.75, 75.75, 74.25, 256. ]]\n", | |
| "\n", | |
| "fig = plt.figure(frameon=False)\n", | |
| "ax_size = [0,0,1,1]\n", | |
| "fig.add_axes(ax_size)\n", | |
| "plt.imshow(colors,vmin=0, vmax=256, origin='upper')\n", | |
| "plt.axis('off')\n", | |
| "## As you noted.\n", | |
| "plt.savefig('output.png',bbox_inches='tight', pad_inches=0)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "import matplotlib.pyplot as plt\n", | |
| "%matplotlib inline\n", | |
| "fig = plt.figure(frameon=False)\n", | |
| "ax = fig.add_axes([0, 0, 1, 1])\n", | |
| "ax.axis('off')\n", | |
| "\n", | |
| "ax.plot(range(10))\n", | |
| "\n", | |
| "with open('image1.png', 'w') as outfile:\n", | |
| " fig.canvas.print_png(outfile)" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "# References\n", | |
| "https://progzoo.net/wiki/Python:Filtering_an_Array\n", | |
| "http://www.scipy-lectures.org/packages/scikit-image/ \n", | |
| "http://effbot.org/imagingbook/image.htm \n", | |
| " \n", | |
| "a = np.array([[1, 2, 0, 0]," | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [ | |
| "img = imread(file_, as_grey=True)\n", | |
| "im = img_as_float(img)\n", | |
| "#im = misc.imread(file_)\n", | |
| "#im=np.fromfile(file_, dtype=np.int64)\n", | |
| "\n", | |
| "#Filler to avoid stretching\n", | |
| "orig_rows, orig_cols = im.shape\n", | |
| "print orig_rows, orig_cols\n", | |
| "if orig_rows < orig_cols:\n", | |
| " for addition in range(0,orig_cols-orig_rows):\n", | |
| " #adding white rows\n", | |
| " lst = np.array(list(float(255) for x in range(0,orig_cols)))\n", | |
| " im= np.vstack((im,lst))\n", | |
| "if orig_rows > orig_cols:\n", | |
| " for addition in range(0,orig_rows-orig_cols):\n", | |
| " #adding white columns\n", | |
| " lst = np.array(list([float(255)] for x in range(0,orig_rows)))\n", | |
| " im= np.hstack((im,lst))\n", | |
| "image = resize(im, (48, 48))\n", | |
| "imsave('test.jpg',im)\n", | |
| "imsave('test1.jpg',image)\n", | |
| "plt.imshow(im, cmap=cm.gray)\n", | |
| "plt.show()" | |
| ], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| }, | |
| { | |
| "cell_type": "code", | |
| "source": [], | |
| "outputs": [], | |
| "execution_count": null, | |
| "metadata": { | |
| "collapsed": false, | |
| "outputHidden": false, | |
| "inputHidden": false | |
| } | |
| } | |
| ], | |
| "metadata": { | |
| "kernelspec": { | |
| "name": "python2", | |
| "language": "python", | |
| "display_name": "Python 2" | |
| }, | |
| "kernel_info": { | |
| "name": "python2" | |
| }, | |
| "language_info": { | |
| "mimetype": "text/x-python", | |
| "nbconvert_exporter": "python", | |
| "name": "python", | |
| "pygments_lexer": "ipython2", | |
| "version": "2.7.13", | |
| "file_extension": ".py", | |
| "codemirror_mode": { | |
| "version": 2, | |
| "name": "ipython" | |
| } | |
| }, | |
| "github_username": "BlogBlocks", | |
| "gist_id": "0cb52687db00d505d83fd3aff4156d38" | |
| }, | |
| "nbformat": 4, | |
| "nbformat_minor": 4 | |
| } |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment