Friday, July 30, 2010

OpenGL Intellivision Man

Not long ago I set out to turn an animated gif I found of the Intellivision Running Man into an OpenGL generated video clip. My goal was to shrink the images into tiny bitmaps, a.k.a. 8-bit, and then use the resulting bitmap data as positioning information within a 3D space. Another way of saying it: I wanted to render each pixel from the original images as a 3D block. Jump to the video to get a better idea about what I mean:

http://www.youtube.com/watch?v=E1rXt0l2pxY

Along the way I took a few turns. I never did reduce the image data to its smallest possible representation. I did reduce the image size for each image to 24x24 pixels and I converted the image data to grayscale. Using the handy PIL library, I was able to pull in the bitmap file data as a tuple of pixel values. I used each "pixel" to offset the drawing position in pyglet's on_draw handler.

The code I used to render the animation is a hack of an example found here:

http://code.google.com/p/pyglet-hene/source/browse/trunk/

This is a fairly ugly hack, meaning I didn't work to make the code beautiful. It's just the original code, chopped, altered and enhanced as needed to accomplish what I wanted to see rendered on the screen.

Though not provided here, to run this code an image directory needs to be provided named "data" containing a series of bitmaps. My directory contains this:


ls -ltr
total 32
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 9.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 8.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 6.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 5.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 4.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 3.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 2.bmp
-rw-r--r-- 1 dvenable dvenable 1654 2010-07-20 14:06 1.bmp


If you are so inclined to try your own little experiment, find a favorite animated gif on the web, extract each frame, reduce size, convert to grayscale and drop your own images in a directory similar to the one I created here. Your animation result should be similar to what I've produced here.

And here's the complete source:


from pyglet.gl import *
import pyglet
from pyglet.window import *
from pyglet import image
import os
from PIL import Image
import glob
from math import sin, cos

window = pyglet.window.Window(width=640, height=480, resizable=True)

y=0.0
x=-10.0
z=10.0
xspeed = 0.5
yspeed = 0.0
lx=ly=0
lz=-2
angle=ratio=0.0

boxcol = [ [1.0, 0.0, 0.0], # bright: red
[1.0, 0.5, 0.0], # orange
[1.0, 1.0, 0.0], # yellow
[0.0, 1.0, 0.0], # green
[0.0, 1.0, 1.0], # blue
]

# Dark: red, orange, yellow, green ,blue
topcol =[ [0.6, 0.0, 0.0],
[0.6, 0.25, 0.0],
[0.6, 0.6, 0.0],
[0.0, 0.6 ,0.0],
[0.0, 0.6, 0.6]]



box = None # display list storage
top = None #display list storage

yloop = None # loop for y axis
xloop = None # loop for x axies

bmpdata = None
nextimg = 0
files = None

def load_image_data():
global bmpdata, bmpdatalen, files

files = glob.glob('data/*.bmp')
files.sort()
bmpdata = map(lambda x: Image.open(x).getdata(), files.__iter__())
bmpdatalen = len(bmpdata)


def build_lists():
global box, top
box = glGenLists(2)

glNewList(box, GL_COMPILE) # new compiled box display list

# draw the box without the top (it will be store in display list
# and will not appear on the screen)
glBegin(GL_QUADS)

# front face
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0)
# back face
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -1.0)
# right face
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0)
glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0)
# left face
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)
glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0)
glEnd()

glEndList() # Done building the list

top=box+1

glNewList(top, GL_COMPILE) # new compiled top display list
# Top face
glBegin(GL_QUADS)
glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0)
glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0)
glTexCoord2f(1.0, 0.0); glVertex3f(1.0, 1.0, 1.0)
glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0)

glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0)
glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0)
glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0)
glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0)
glEnd()
glEndList()



def load_gl_textures():
# load bitmaps and convert to textures
global texture, texture_file, texture_surf
#texture_file = os.path.join('data', 'cube.bmp')
texture_file = files[nextimg]
texture_surf = image.load(texture_file)
texture = texture_surf.get_texture()
glBindTexture(GL_TEXTURE_2D, texture.id)

glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)



def init():
"""
Pyglet oftentimes calls this setup()
"""
glEnable(GL_TEXTURE_2D)

load_image_data()
load_gl_textures()
build_lists()

glShadeModel(GL_SMOOTH) # Enables smooth shading
glClearColor(0.0, 0.0, 0.0, 0.0) #Black background

glClearDepth(1.0) # Depth buffer setup
glEnable(GL_DEPTH_TEST) # Enables depth testing
glDepthFunc(GL_LEQUAL) # The type of depth test to do

glEnable(GL_LIGHT0) # quick and dirty lighting

#glEnable(GL_LIGHTING) # enable lighting
glEnable(GL_COLOR_MATERIAL) # enable coloring

glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # Really nice perspective calculations



@window.event
def on_draw():
global nextimg, bmpdata, x, y, z, lx, ly, lz

# Here we do all the drawing
glClear(GL_COLOR_BUFFER_BIT |GL_DEPTH_BUFFER_BIT)

# Select the texture
load_gl_textures()
#glBindTexture(GL_TEXTURE_2D, texture.id)

xloop = 1
yloop = 1

mandata = bmpdata[nextimg]
for idx in range (0, len(mandata)):
if (idx+1) % 24 == 0:
yloop += 1
xloop = 1
else:
xloop += 1
if mandata[idx] < 100:

glLoadIdentity() # reset our view
gluLookAt(x,y,z, x+lx , y+ly, z+lz, 0.0, 1.0, 0.0)

glTranslatef( xloop*1.8 - 30 ,
28 - yloop*2.4 ,
-60.0)
glColor3f(*boxcol[xloop % 4]) # select a box color
glCallList(box) # draw the box

glColor3f(*topcol[1])
glCallList(top) # draw the top


return pyglet.event.EVENT_HANDLED

def moveMeFlat(direction):
global x, z, y, lx, lz, ly
x = x - direction*(lx)*0.75;
y = y + direction*(ly)*0.5;
z = z + direction*(lz)*0.5;

def orientMe(ang):
global lx, lz
lx = sin(ang)
lz = -cos(ang)


def update(dt):
global z, angle
angle +=0.005
orientMe(angle)
moveMeFlat(0.5)

def update2(dt):
global nextimg
if nextimg < bmpdatalen-1:
nextimg += 1
else:
nextimg = 0


pyglet.clock.schedule_interval(update2, .1)
pyglet.clock.schedule(update)

@window.event
def on_resize(width, height):
if height==0:
height = 1
glViewport(0, 0, width, height)
glMatrixMode(GL_PROJECTION)
glLoadIdentity()

# Calculate the aspect ratio of the window
gluPerspective(45.0, 1.0*width/height, 0.1, 100.0)

glMatrixMode(GL_MODELVIEW)
glLoadIdentity()
return pyglet.event.EVENT_HANDLED

init()

pyglet.app.run()

Tuesday, July 20, 2010

Cross-compile ActiveMQ-cpp on Centos 5

To date an RPM for activemq-cpp is not available for Centos 5. I found RPMs in Fedora 13 and 14 repositories, but due to dependencies on fresher libs, they won't install.

Building for 32 bit architecture on 64 bit Centos 5 was a bit tricky. Here's the recipe I came up with that worked for me:

Install dependencies:


yum install expat-devel zlib-devel uuid-c++-devel openssl-devel


Download sources from here or other mirrors:


wget http://www.carfab.com/apachesoftware/activemq/activemq-cpp/source/activemq-cpp-library-3.2.1-src.tar.gz
wget http://mirror.candidhosting.com/pub/apache/apr/apr-1.4.2.tar.gz


Extract archives and build APR first. Note PKG_CONFIG_PATH, as this is one of the keys to ensuring that lib64 libraries are not found during link step.


[root@myserver] cd apr-1.4.2
[root@myserver] ./configure --prefix=/usr --libdir=/usr/lib CXXFLAGS="-m32" LDFLAGS="-m32" CFLAGS="-m32" --build=i686-redhat-linux-gnu PKG_CONFIG_PATH=/usr/lib/pkgconfig
[root@myserver] ./make
[root@myserver] ./make install


Next build and install ActiveMQ-cpp:


[root@myserver] cd ../activemq-cpp-library-3.2.1
[root@myserver] ./configure --prefix=/usr --libdir=/usr/lib CXXFLAGS="-m32" LDFLAGS="-m32" CFLAGS="-m32" --build=i686-redhat-linux-gnu PKG_CONFIG_PATH=/usr/lib/pkgconfig --with-apr=../apr-1.4.2/apr-1-config
[root@myserver] ./make
[root@myserver] ./make install

Tuesday, July 13, 2010

qemu-img

qemu-img is a handy tool for converting one virtual machine image format into another.


qemu-img convert windowsxp.img -O vdi windowsxp.vdi


I had need for its use after running into errors creating a virtual machine with virt-manager. KVM managed by virt-manager has been my recent virtualization solution of choice, that is, until last Friday when I hit a snag creating a new virtual machine for WindowsXP. The installation process got stuck and I wasn't able to recover. Following advice from somewhere it the net-realm, I opted to create the image using this easy command line two-liner.


dd if=/dev/zero of=vm8.img bs=2048k count=12000
kvm -m 512 -cdrom winxp.iso -boot d v8.img


The installation was successful and running the image from the command line using kvm was a snap, but what I really wanted was to place this image under the control of virt-manager.

I couldn't find a nice option to import an existing image, so if it exists it is not very intuitive. Apparently to place it under virt-manager's control I would need to create an xml file under /etc/libvirt/qemu/ and in addition I would probably need to convert the image from raw format to qcow2. But after messing around with this for too long, a colleague recommended I use virtualbox.

I used virtualbox in the past, so I figured a different approach might be worthwhile and should get me beyond the headache in the short term. But what about the work I did creating the original image? Installing WindowsXP is a hassle, not to mention the effort I'd put into installing subsequent software on the image.

And that's where qemu-img came in. virtualbox only reads vmi formatted images, thus I converted my raw image to vmi.

Updated: FAIL

Importing the new vmi was easy enough, but once I created the new virtual machine and started it...I got this error from virtualbox-osi:

"Failed to start the virtual machine windowsxp. VirtualBox can't operate in VMX root mode. Please disable the KVM kernel extension, recompile your kernel and reboot."

Okay...but I will silently resent you.

Updated: Original image hung after removing KVM. At this point I'm going to throw up my hands and create a new image and fresh install. I'm at the point of diminishing returns...

Wednesday, July 07, 2010

Going really 8-bit

Back when I was a kid, my nerd friends and I would create graphics for our Commodore computers by filling filling in squares on graph paper and calculating the bitmaps. A single character on the Commodore 64 and Vic 20 was an 8x8 grid.

I recently found an 8-bit image fondly remembered from my youth, the Intellivision Running Man, in animated GIF format. I converted the GIF to a short video, which I embedded in a video project.

Some of my video conversion notes were covered in another post and some are on my wiki.

I want to extract the Intellivision Running Man bitmaps for use in an opengl project that I'm thinking about doing. Though it would probably be simpler for me to just sit down with pen and paper to graph and calculate the bitmaps, I'm hoping to find tools that will allow me to extract the data directly from raw video.

I first needed to reduce the screen resolution as much as possible by shrinking the video and dropping frames. Here I shrank the video output to 24x24 and reduced the frame rate to 4 frames per second.

ffmpeg -i intel.avi -r 4 -s 24x24 intel8bit.avi

Next I extracted individual bitmap images:

ffmpeg -i intel8bit.avi -f image2 foo_%5d.bmp

I initially took one image into Gimp and used Desaturate to convert it to grayscale, and then I used Levels to reduce image to pure Black or White. But I didn't want to repeat this again and again. So I wrote a small Python script which achieved the same goal and along the way got to play with the PIL library, which I've not used before.


from PIL import Image
import sys, os

def bw(pt):
if pt>126:
return 255
else:
return 0

for infile in sys.argv[1:]:
f, e = os.path.splitext(infile)
outfile = f + "_mod" + e

im = Image.open(infile)

im = im.convert('L')
out = im.point(bw)

out.save(outfile)
print outfile


This little script loads a bitmap then uses convert() with the 'L' option to make it grayscale. (I couldn't find a comprehensive list of options for convert---the documentation could use some work.)

Finally, I used the point method that passes each pixel in the bitmap data to a custom function. Your function can do anything, but mine just looks at the value, makes higher level grays absolute black and lower level grays absolute white. point() returns a copy of the bitmap file (headers updated and intact) with the transformed result, which is then saved.

At this point I have true black and white bitmaps.

I wanted to quickly visualize my bitmap, to get a feel for how much more I want to reduce the size of my bitmaps. This little script renders my images to screen as a series of X's.


from PIL import Image
import sys, os


for infile in sys.argv[1:]:

im = Image.open(infile)
data = im.getdata()
for idx in range (0, len(data)):
i = data[idx]
if i == 255:
print 'X',
else:
print ' ',

if (idx+1) % 24 == 0:
print ''



The output looks like this:


devin@studio:~/src/py$ python showdata.py /home/devin/Video/bitmaps/foo_00041_mod.bmp

X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X
X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X
X X X X X X X X X X X X X X X X X X X X X X X X