摩尔网 > CG教程 > Autodesk Maya教程
因为自己用FFmpeg合成视频的时候,费了很大劲,查了大量资料才弄好,希望别人在遇到类似的情况不用绞尽脑汁。
思路是:先拍四个视图的屏,然后将这四个视屏合起来并带有声音。
里面包含有FFmpeg的路径可以自己下载最新版的设置下路径就行了。
详细代码如下:
import os
import subprocess
import maya.cmds as cmds
def split_list_by_chuck(userList, chuck):
for i in xrange(0, len(userList), chuck):
yield userList[i: i + chuck]
def get_timeSlinder_current_sound():
import maya.mel
gPlayBackSlider = maya.mel.eval($tmpVar=$gPlayBackSlider)
sound = cmds.timeControl(gPlayBackSlider, q=1, s=1, ds=1)
if sound:
return sound
return None
def compostion_multiple_videos(videos, output_video, start_number, duration, rate, sound=None, column=2):
ffmpeg_path = //storage1.of3d.com/centralizedTools/third_party/exe/ffmpeg/win/3.3.1/bin/ffmpeg.exe
temp_videos = []
videos_num = len(videos)
if column > videos_num:
print >>> The amount of videos must be larger than column
return False
for index, video in enumerate(split_list_by_chuck(videos, column)):
input_videos = []
num = len(video)
for i in xrange(num):
input_videos.append(-i %s % video[i])
composition_video_temp = os.path.join(os.path.dirname(output_video), temp_%s.mov % index).replace(\\, /)
if os.path.exists(composition_video_temp):
os.remove(composition_video_temp)
all_input = .join(input_videos)
command_temp = {ffmpeg} {input_videos} -filter_complex "[0:v:0]pad=iw*{column}:ih[bg]; [bg][1:v:0]overlay=w" {output_video_temp}.format(
ffmpeg=ffmpeg_path, input_videos=all_input, column=column, output_video_temp=composition_video_temp)
print >>> Command: %s % command_temp
state = subprocess.call(command_temp, shell=True)
temp_videos.append(composition_video_temp)
if temp_videos:
input_videos = []
num = len(temp_videos)
for i in xrange(num):
input_videos.append(-i %s % temp_videos[i])
final_video_temp = os.path.join(os.path.dirname(output_video), %s_temp.mov %
os.path.basename(output_video).split(.)[0]).replace(\\, /)
all_input = .join(input_videos)
command = {ffmpeg} {input_videos_temp} -preset ultrafast -filter_complex vstack {output_video}.format(
ffmpeg=ffmpeg_path, input_videos_temp=all_input, output_video=final_video_temp)
print >>> Command: %s % command
if os.path.exists(final_video_temp):
os.remove(final_video_temp)
state = subprocess.call(command, shell=True)
if sound:
command = {ffmpeg} -t {duration} -i {final_video_temp} -i {audio} -shortest {output_video}.format(
ffmpeg=ffmpeg_path, duration=str(duration), final_video_temp=final_video_temp, audio=sound, output_video=output_video)
print >>> Command: %s % command
if os.path.exists(output_video):
os.remove(output_video)
state = subprocess.call(command, shell=True)
temp_videos.append(final_video_temp)
all_temp_video = set(temp_videos).union(set(videos))
for video in all_temp_video:
if os.path.exists(video):
os.remove(video)
if state == 0:
return True
else:
return False
def get_rate():
rate_data = {game: 15, film: 24, pal: 25, ntsc: 30, show: 48, palf: 50, ntscf: 60, 100fps: 100}
current_rate = cmds.currentUnit(q=1, t=1)
rate = rate_data.get(current_rate, 24)
return rate
def playblast_from_multiple_views(videoPath, widthHeight, frameRange, videoFormat=qt, compression=H.264, sound=None):
viewPanels = cmds.getPanel(typ=modelPanel)
visiblePanels = cmds.getPanel(vis=1)
renderPanels = list(set(viewPanels) & set(visiblePanels))
video_paths = []
camera_names = []
sound_file = None
frange_range = frameRange[1] - frameRange[0] + 1
scene_rate = get_rate()
duration = %.03f % (frange_range * 1.0 / scene_rate)
if sound:
sound_file = sound
else:
audio = get_timeSlinder_current_sound()
if audio:
audio_file = cmds.getAttr(%s.filename % audio)
if audio_file.startswith($):
env_variable = os.environ.get(audio_file.split(/, 1)[0].strip($))
sound_file = os.path.join(env_variable, audio_file.split(/, 1)[1]).replace(\\, /)
else:
sound_file = audio_file
print >>>, sound_file
for panel in renderPanels:
cmds.setFocus(panel)
camera_name = cmds.modelEditor(panel, q=1, cam=1)
if camera_name in camera_names:
continue
single_view_video_path = os.path.join(os.path.dirname(videoPath), %s.mov % camera_name) # hardcode
print >>> camera: %s; videoPath: %s % (camera_name, single_view_video_path)
cmds.modelEditor(panel, e=1, hud=1, da=smoothShaded, dtx=1, dl=default, alo=0, pm=1, ns=1)
cmds.playblast(startTime=frameRange[0], endTime=frameRange[1], fmt=videoFormat, filename=single_view_video_path, forceOverwrite=1,
sequenceTime=0, clearCache=1, viewer=0, showOrnaments=1, offScreen=1,
fp=4, percent=100, compression=compression, quality=100, widthHeight=(1280, 720))
print >>> playblast camera: %s % camera_name
camera_names.append(camera_name)
video_paths.append(single_view_video_path)
state = compostion_multiple_videos(video_paths, videoPath, frameRange[0], duration, scene_rate, sound_file)
if state:
print >>> Finish playblasting multiple views
else:
print >>> Something wrong, please check carefully
if __name__ == __main__:
video_path = F:/Multiple_View/all.mov
playblast_from_multiple_views(video_path, (1280, 720), [1001, 1350], sound=None)
稍微解释下:
command_temp = {ffmpeg} {input_videos} -filter_complex "[0:v:0]pad=iw*{column}:ih[bg]; [bg][1:v:0]overlay=w" {output_video_temp}.format(
ffmpeg=ffmpeg_path, input_videos=all_input, column=column, output_video_temp=composition_video_temp)12
这句的意思是将多个视频横向合起来,其中column代表几列,如2,就表示两个视频并列,在视频个数已知的情况,那么行数也就可知了。
command = {ffmpeg} {input_videos_temp} -preset ultrafast -filter_complex vstack {output_video}.format(
ffmpeg=ffmpeg_path, input_videos_temp=all_input, output_video=final_video_temp)12
这句是将视频纵向合起来,有几个合几个。
command = {ffmpeg} -t {duration} -i {final_video_temp} -i {audio} -shortest {output_video}.format(
ffmpeg=ffmpeg_path, duration=str(duration), final_video_temp=final_video_temp, audio=sound, output_video=output_video)12
这个是最后将音频合最终的总的视频合起来的命令,主要是那个shortest这个参数表示,如果在合得时候,音频比视频长的情况就按视频长度为最终的长度时间。
上一篇: 学游戏开发之前你需要了解的一些事情 下一篇: 关于CAD图纸导入SU后线段改变的解决办法