-
-
Save figgleforth/b5b193c3379b3f048210 to your computer and use it in GitHub Desktop.
// | |
// NSImage+pixelData.swift | |
// | |
// Created by Figgleforth on 5/21/15. | |
// Copyright (c) 2015 Bojan Percevic. All rights reserved. | |
// | |
import AppKit | |
extension NSImage { | |
func pixelData() -> [Pixel] { | |
var bmp = self.representations[0] as! NSBitmapImageRep | |
var data: UnsafeMutablePointer<UInt8> = bmp.bitmapData | |
var r, g, b, a: UInt8 | |
var pixels: [Pixel] = [] | |
for row in 0..<bmp.pixelsHigh { | |
for col in 0..<bmp.pixelsWide { | |
r = data.memory | |
data = data.advancedBy(1) | |
g = data.memory | |
data = data.advancedBy(1) | |
b = data.memory | |
data = data.advancedBy(1) | |
a = data.memory | |
data = data.advancedBy(1) | |
pixels.append(Pixel(r: r, g: g, b: b, a: a)) | |
} | |
} | |
return pixels | |
} | |
} | |
struct Pixel { | |
var r: Float | |
var g: Float | |
var b: Float | |
var a: Float | |
var row: Int | |
var col: Int | |
init(r: UInt8, g: UInt8, b: UInt8, a: UInt8, row: Int, col: Int) { | |
self.r = Float(r) | |
self.g = Float(g) | |
self.b = Float(b) | |
self.a = Float(a) | |
self.row = row | |
self.col = col | |
} | |
var color: NSColor { | |
return NSColor(red: CGFloat(r/255.0), green: CGFloat(g/255.0), blue: CGFloat(b/255.0), alpha: CGFloat(a/255.0)) | |
} | |
var description: String { | |
return "RGBA(\(r), \(g), \(b), \(a))" | |
} | |
} |
@AbePralle: Nice way to do it. Basically you just let Core Graphics do the work of converting the image data to a known format for you. 'CGContextDrawImage' is fast, not to mention thoroughly debugged from decades of extensive usage. As for speed, if profiling were to indicate it's a problem for a given app, the main issue would likely be the dynamic memory allocation for the buffer; however, given that the gist's code repeatedly appends to an initially empty array, which has to re-allocate a few times before finally being big enough for the image data, I think your code would out-perform it. The gist's code could just call reserveCapacity
before the loop to remedy that, and if it turned out to be an issue, both could use some kind of buffer/array re-use approach to avoid memory allocation overhead.
My approach is to create a CGContextRef (drawing context) backed by my own data buffer and then draw the CGImage into that. That takes care of any different formats that @chipjarred mentions and may be faster as well, IDK. My Objective C++ code for that, which should be fairly easy to convert into Swift: