我正在从一组 uiimage 创建视频。
我使用了多篇 stackoverflow 帖子并混合和修改了很多提供的代码。
我将包含我当前正在使用的文件。
图像目前是这样出来的,我想这是因为因为它们被转换为 CGImage,所以它们不保持方向?
此外,当用户拍摄水平照片时,我如何处理缩放。
我基本上想显示拍摄的图像。
前两张图片是垂直拍摄的,看起来向左翻转了 90 度,最后一张是水平拍摄的。
类:
import AVFoundation
import UIKit
let kErrorDomain = "TimeLapseBuilder"
let kFailedToStartAssetWriterError = 0
let kFailedToAppendPixelBufferError = 1
public class TimeLapseBuilder: NSObject {
var photos: [UIImage]
var videoWriter: AVAssetWriter?
var outputSize = CGSizeMake(1920, 1080)
public init(photos: [UIImage]) {
self.photos = photos
super.init()
}
public func build(outputSize outputSize: CGSize, progress: (NSProgress -> Void), success: (NSURL -> Void), failure: (NSError -> Void)) {
self.outputSize = outputSize
var error: NSError?
let startTime = NSDate.timeIntervalSinceReferenceDate()
let fileManager = NSFileManager.defaultManager()
let urls = fileManager.URLsForDirectory(.DocumentDirectory, inDomains: .UserDomainMask)
guard let documentDirectory: NSURL = urls.first else {
fatalError("documentDir Error")
}
let videoOutputURL = documentDirectory.URLByAppendingPathComponent("AssembledVideo.mov")
if NSFileManager.defaultManager().fileExistsAtPath(videoOutputURL.path!) {
do {
try NSFileManager.defaultManager().removeItemAtPath(videoOutputURL.path!)
}catch{
fatalError("Unable to delete file: \(error) : \(__FUNCTION__).")
}
}
guard let videoWriter = try? AVAssetWriter(URL: videoOutputURL, fileType: AVFileTypeQuickTimeMovie) else{
fatalError("AVAssetWriter error")
}
let outputSettings = [
AVVideoCodecKey : AVVideoCodecH264,
AVVideoWidthKey : NSNumber(float: Float(outputSize.width)),
AVVideoHeightKey : NSNumber(float: Float(outputSize.height)),
]
guard videoWriter.canApplyOutputSettings(outputSettings, forMediaType: AVMediaTypeVideo) else {
fatalError("Negative : Can't apply the Output settings...")
}
let videoWriterInput = AVAssetWriterInput(mediaType: AVMediaTypeVideo, outputSettings: outputSettings)
let sourcePixelBufferAttributesDictionary = [
kCVPixelBufferPixelFormatTypeKey as String: NSNumber(unsignedInt: kCVPixelFormatType_32ARGB),
kCVPixelBufferWidthKey as String: NSNumber(float: Float(outputSize.width)),
kCVPixelBufferHeightKey as String: NSNumber(float: Float(outputSize.height)),
]
let pixelBufferAdaptor = AVAssetWriterInputPixelBufferAdaptor(
assetWriterInput: videoWriterInput,
sourcePixelBufferAttributes: sourcePixelBufferAttributesDictionary
)
assert(videoWriter.canAddInput(videoWriterInput))
videoWriter.addInput(videoWriterInput)
if videoWriter.startWriting() {
videoWriter.startSessionAtSourceTime(kCMTimeZero)
assert(pixelBufferAdaptor.pixelBufferPool != nil)
let media_queue = dispatch_queue_create("mediaInputQueue", nil)
videoWriterInput.requestMediaDataWhenReadyOnQueue(media_queue, usingBlock: { () -> Void in
let fps: Int32 = 1
let frameDuration = CMTimeMake(1, fps)
let currentProgress = NSProgress(totalUnitCount: Int64(self.photos.count))
var frameCount: Int64 = 0
while (!self.photos.isEmpty) {
if (videoWriterInput.readyForMoreMediaData) {
let nextPhoto = self.photos.removeAtIndex(0)
let lastFrameTime = CMTimeMake(frameCount, fps)
let presentationTime = frameCount == 0 ? lastFrameTime : CMTimeAdd(lastFrameTime, frameDuration)
if !self.appendPixelBufferForImage(nextPhoto, pixelBufferAdaptor: pixelBufferAdaptor, presentationTime: presentationTime) {
error = NSError(domain: kErrorDomain, code: kFailedToAppendPixelBufferError,
userInfo: [
"description": "AVAssetWriterInputPixelBufferAdapter failed to append pixel buffer",
"rawError": videoWriter.error ?? "(none)"
])
break
}
frameCount++
currentProgress.completedUnitCount = frameCount
progress(currentProgress)
}
}
let endTime = NSDate.timeIntervalSinceReferenceDate()
let elapsedTime: NSTimeInterval = endTime - startTime
print("rendering time \(self.stringFromTimeInterval(elapsedTime))")
videoWriterInput.markAsFinished()
videoWriter.finishWritingWithCompletionHandler { () -> Void in
if error == nil {
success(videoOutputURL)
}
}
})
} else {
error = NSError(domain: kErrorDomain, code: kFailedToStartAssetWriterError,
userInfo: ["description": "AVAssetWriter failed to start writing"]
)
}
if let error = error {
failure(error)
}
}
public func appendPixelBufferForImage(image: UIImage, pixelBufferAdaptor: AVAssetWriterInputPixelBufferAdaptor, presentationTime: CMTime) -> Bool {
var appendSucceeded = true
autoreleasepool {
var pixelBuffer: CVPixelBuffer? = nil
let options: [NSObject : AnyObject] = [
kCVPixelBufferCGImageCompatibilityKey : Int(true),
kCVPixelBufferCGBitmapContextCompatibilityKey : Int(true)
]
let status: CVReturn = CVPixelBufferCreate(kCFAllocatorDefault, Int(image.size.width), Int(image.size.height), kCVPixelFormatType_32ARGB, options as CFDictionaryRef, &pixelBuffer)
if let pixelBuffer = pixelBuffer where status == 0 {
let managedPixelBuffer = pixelBuffer
print("Scaleeee \(image.scale)")
print("Widthhhh \(image.size.width)")
print("Heighttt \(image.size.height)")
pixelBufferFromImage(image.CGImage!, pxbuffer: managedPixelBuffer, andSize: CGSize(width: image.size.width, height: image.size.height))
appendSucceeded = pixelBufferAdaptor.appendPixelBuffer(pixelBuffer, withPresentationTime: presentationTime)
} else {
NSLog("error: Failed to allocate pixel buffer from pool")
}
}
return appendSucceeded
}
func pixelBufferFromImage(image: CGImageRef, pxbuffer: CVPixelBuffer, andSize size: CGSize){
CVPixelBufferLockBaseAddress(pxbuffer, 0)
let pxdata = CVPixelBufferGetBaseAddress(pxbuffer)
let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
let context = CGBitmapContextCreate(pxdata, Int(size.width), Int(size.height), 8, CVPixelBufferGetBytesPerRow(pxbuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
CGContextConcatCTM(context, CGAffineTransformMakeRotation(0))
CGContextDrawImage(context, CGRectMake(0, 0, size.width, size.height), image)
CVPixelBufferUnlockBaseAddress(pxbuffer, 0)
}
// //Old fillpixel method
// func fillPixelBufferFromImage(image: UIImage, pixelBuffer: CVPixelBuffer, contentMode:UIViewContentMode){
//
// CVPixelBufferLockBaseAddress(pixelBuffer, 0)
////
// let data = CVPixelBufferGetBaseAddress(pixelBuffer)
// let rgbColorSpace = CGColorSpaceCreateDeviceRGB()
// let context = CGBitmapContextCreate(data, Int(self.outputSize.width), Int(self.outputSize.height), 8, CVPixelBufferGetBytesPerRow(pixelBuffer), rgbColorSpace, CGImageAlphaInfo.PremultipliedFirst.rawValue)
//
// CGContextClearRect(context, CGRectMake(0, 0, CGFloat(self.outputSize.width), CGFloat(self.outputSize.height)))
//
// let horizontalRatio = CGFloat(self.outputSize.width) / image.size.width
// let verticalRatio = CGFloat(self.outputSize.height) / image.size.height
// var ratio: CGFloat = 1
//
//// print("horizontal ratio \(horizontalRatio)")
//// print("vertical ratio \(verticalRatio)")
//// print("ratio \(ratio)")
//// print("Image Width - \(image.size.width). Image Height - \(image.size.height)")
//
// switch(contentMode) {
// case .ScaleAspectFill:
// ratio = max(horizontalRatio, verticalRatio)
// case .ScaleAspectFit:
// ratio = min(horizontalRatio, verticalRatio)
// default:
// ratio = min(horizontalRatio, verticalRatio)
// }
//
// let newSize:CGSize = CGSizeMake(image.size.width * ratio, image.size.height * ratio)
//
// let x = newSize.width < self.outputSize.width ? (self.outputSize.width - newSize.width) / 2 : 0
// let y = newSize.height < self.outputSize.height ? (self.outputSize.height - newSize.height) / 2 : 0
//
// CGContextDrawImage(context, CGRectMake(x, y, newSize.width, newSize.height), image.CGImage)
//
// CVPixelBufferUnlockBaseAddress(pixelBuffer, 0)
// }
func stringFromTimeInterval(interval: NSTimeInterval) -> String {
let ti = NSInteger(interval)
let ms = Int((interval % 1) * 1000)
let seconds = ti % 60
let minutes = (ti / 60) % 60
let hours = (ti / 3600)
if hours > 0 {
return NSString(format: "%0.2d:%0.2d:%0.2d.%0.2d", hours, minutes, seconds, ms) as String
}else if minutes > 0 {
return NSString(format: "%0.2d:%0.2d.%0.2d", minutes, seconds, ms) as String
}else {
return NSString(format: "%0.2d.%0.2d", seconds, ms) as String
}
}
}
最佳答案
这是 UIImage 的常见问题,因为方向通常来自相机上的 EXIF 数据。
我在 UIImage 上写了一个扩展,允许我以正确的方向访问图像的实例。
extension UIImage {
var fixedOrientation: UIImage {
if self.imageOrientation == .Up {
return self
}
var transform: CGAffineTransform = CGAffineTransformIdentity
switch (self.imageOrientation) {
case .Down:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
break
case .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, self.size.width)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI))
break
case .Left:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
break
case .LeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformRotate(transform, CGFloat(M_PI_2))
break
case .Right:
transform = CGAffineTransformTranslate(transform, 0, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
break
case .RightMirrored:
transform = CGAffineTransformTranslate(transform, 0, self.size.height)
transform = CGAffineTransformRotate(transform, CGFloat(-M_PI_2))
break
case .Up:
break
case .UpMirrored:
break
}
switch (self.imageOrientation) {
case .UpMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break;
case .DownMirrored:
transform = CGAffineTransformTranslate(transform, self.size.width, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break;
case .LeftMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .RightMirrored:
transform = CGAffineTransformTranslate(transform, self.size.height, 0)
transform = CGAffineTransformScale(transform, -1, 1)
break
case .Up:
break
case .Right:
break
case .Down:
break
case .Left:
break
}
let context = CGBitmapContextCreate(nil, Int(self.size.width), Int(self.size.height), CGImageGetBitsPerComponent(self.CGImage), 0, CGImageGetColorSpace(self.CGImage), CGImageGetBitmapInfo(self.CGImage).rawValue)
CGContextConcatCTM(context, transform)
switch (self.imageOrientation) {
case .Left:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .LeftMirrored:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .Right:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
case .RightMirrored:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.height, self.size.width), self.CGImage)
break
default:
CGContextDrawImage(context, CGRectMake(0, 0, self.size.width, self.size.height), self.CGImage)
break
}
let cgImage = CGBitmapContextCreateImage(context)
let uiImage = UIImage.init(CGImage: cgImage!)
return uiImage
}
}
类似地,如果您知道图像所需的 CGSize(您通常可以不用当前 View 的边界),那么您也可以将其用作 UIImage 的扩展。
extension UIImage {
func resize(newSize: CGSize) -> UIImage {
UIGraphicsBeginImageContextWithOptions(newSize, false, 1.0)
self.drawInRect(CGRectMake(0, 0, newSize.width, newSize.height))
return UIGraphicsGetImageFromCurrentImageContext()
}
}
我也很好奇这是否可以重构,但这对我来说总是有用的 - 希望这有帮助!
关于ios - uiimage 数组到视频图像方向和比例,我们在Stack Overflow上找到一个类似的问题: https://stackoverflow.com/questions/35827767/
我有多个ActiveRecord子类Item的实例数组,我需要根据最早的事件循环打印。在这种情况下,我需要打印付款和维护日期,如下所示:ItemAmaintenancerequiredin5daysItemBpaymentrequiredin6daysItemApaymentrequiredin7daysItemBmaintenancerequiredin8days我目前有两个查询,用于查找maintenance和payment项目(非排他性查询),并输出如下内容:paymentrequiredin...maintenancerequiredin...有什么方法可以改善上述(丑陋的)代
我的代码目前看起来像这样numbers=[1,2,3,4,5]defpop_threepop=[]3.times{pop有没有办法在一行中完成pop_three方法中的内容?我基本上想做类似numbers.slice(0,3)的事情,但要删除切片中的数组项。嗯...嗯,我想我刚刚意识到我可以试试slice! 最佳答案 是numbers.pop(3)或者numbers.shift(3)如果你想要另一边。 关于ruby-多次弹出/移动ruby数组,我们在StackOverflow上找到一
我需要读入一个包含数字列表的文件。此代码读取文件并将其放入二维数组中。现在我需要获取数组中所有数字的平均值,但我需要将数组的内容更改为int。有什么想法可以将to_i方法放在哪里吗?ClassTerraindefinitializefile_name@input=IO.readlines(file_name)#readinfile@size=@input[0].to_i@land=[@size]x=1whilex 最佳答案 只需将数组映射为整数:@land边注如果你想得到一条线的平均值,你可以这样做:values=@input[x]
我正在使用puppet为ruby程序提供一组常量。我需要提供一组主机名,我的程序将对其进行迭代。在我之前使用的bash脚本中,我只是将它作为一个puppet变量hosts=>"host1,host2"我将其提供给bash脚本作为HOSTS=显然这对ruby不太适用——我需要它的格式hosts=["host1","host2"]自从phosts和putsmy_array.inspect提供输出["host1","host2"]我希望使用其中之一。不幸的是,我终其一生都无法弄清楚如何让它发挥作用。我尝试了以下各项:我发现某处他们指出我需要在函数调用前放置“function_”……这
这个问题在这里已经有了答案:Checktoseeifanarrayisalreadysorted?(8个答案)关闭9年前。我只是想知道是否有办法检查数组是否在增加?这是我的解决方案,但我正在寻找更漂亮的方法:n=-1@arr.flatten.each{|e|returnfalseife
我有一个这样的哈希数组:[{:foo=>2,:date=>Sat,01Sep2014},{:foo2=>2,:date=>Sat,02Sep2014},{:foo3=>3,:date=>Sat,01Sep2014},{:foo4=>4,:date=>Sat,03Sep2014},{:foo5=>5,:date=>Sat,02Sep2014}]如果:date相同,我想合并哈希值。我对上面数组的期望是:[{:foo=>2,:foo3=>3,:date=>Sat,01Sep2014},{:foo2=>2,:foo5=>5:date=>Sat,02Sep2014},{:foo4=>4,:dat
我正在尝试在Ruby中制作一个cli应用程序,它接受一个给定的数组,然后将其显示为一个列表,我可以使用箭头键浏览它。我觉得我已经在Ruby中看到一个库已经这样做了,但我记不起它的名字了。我正在尝试对soundcloud2000中的代码进行逆向工程做类似的事情,但他的代码与SoundcloudAPI的使用紧密耦合。我知道cursesgem,我正在考虑更抽象的东西。广告有没有人见过可以做到这一点的库或一些概念证明的Ruby代码可以做到这一点? 最佳答案 我不知道这是否是您正在寻找的,但也许您可以使用我的想法。由于我没有关于您要完成的工作
这里有一个很好的答案解释了如何在Ruby中下载文件而不将其加载到内存中:https://stackoverflow.com/a/29743394/4852737require'open-uri'download=open('http://example.com/image.png')IO.copy_stream(download,'~/image.png')我如何验证下载文件的IO.copy_stream调用是否真的成功——这意味着下载的文件与我打算下载的文件完全相同,而不是下载一半的损坏文件?documentation说IO.copy_stream返回它复制的字节数,但是当我还没有下
我使用Ember作为我的前端和GrapeAPI来为我的API提供服务。前端发送类似:{"service"=>{"name"=>"Name","duration"=>"30","user"=>nil,"organization"=>"org","category"=>nil,"description"=>"description","disabled"=>true,"color"=>nil,"availabilities"=>[{"day"=>"Saturday","enabled"=>false,"timeSlots"=>[{"startAt"=>"09:00AM","endAt"=>
我正在尝试解析一个文本文件,该文件每行包含可变数量的单词和数字,如下所示:foo4.500bar3.001.33foobar如何读取由空格而不是换行符分隔的文件?有什么方法可以设置File("file.txt").foreach方法以使用空格而不是换行符作为分隔符? 最佳答案 接受的答案将slurp文件,这可能是大文本文件的问题。更好的解决方案是IO.foreach.它是惯用的,将按字符流式传输文件:File.foreach(filename,""){|string|putsstring}包含“thisisanexample”结果的