光线追踪是一种生成高质量图像的技术,通过模拟光线与物体之间的交互来生成逼真的光影效果。在这篇博客中,我们将使用Python来实现一个简单的光线追踪算法,生成一个具有光影效果的三维场景。本文将带你一步步实现这一效果,并展示如何使用Python编程实现光线追踪。
在开始之前,你需要确保你的系统已经安装了以下库:
如果你还没有安装这些库,可以使用以下命令进行安装:
pip install numpy pillow
我们首先需要导入Numpy和Pillow库:
import numpy as np
from PIL import Image
我们定义一个函数来处理光线追踪的主要逻辑:
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def intersect_sphere(origin, direction, sphere):
oc = origin - sphere['center']
a = np.dot(direction, direction)
b = 2.0 * np.dot(oc, direction)
c = np.dot(oc, oc) - sphere['radius']**2
discriminant = b**2 - 4*a*c
if discriminant < 0:
return False, None
else:
t = (-b - np.sqrt(discriminant)) / (2.0 * a)
return True, t
def ray_trace(origin, direction, spheres):
closest_t = float('inf')
hit_sphere = None
for sphere in spheres:
hit, t = intersect_sphere(origin, direction, sphere)
if hit and t < closest_t:
closest_t = t
hit_sphere = sphere
if hit_sphere is None:
return np.array([0, 0, 0])
hit_point = origin + closest_t * direction
normal = normalize(hit_point - hit_sphere['center'])
light_dir = normalize(np.array([1, 1, -1]))
intensity = max(np.dot(normal, light_dir), 0)
color = intensity * hit_sphere['color']
return color
我们定义场景中的球体及其属性,然后进行光线追踪并渲染图像:
width, height = 800, 600
camera = np.array([0, 0, 1])
viewport = np.array([2, 1.5, 1])
image = Image.new("RGB", (width, height))
pixels = image.load()
spheres = [
{'center': np.array([0, 0, -5]), 'radius': 1, 'color': np.array([255, 0, 0])},
{'center': np.array([-2, 1, -6]), 'radius': 1, 'color': np.array([0, 255, 0])},
{'center': np.array([2, 1, -6]), 'radius': 1, 'color': np.array([0, 0, 255])}
]
for y in range(height):
for x in range(width):
px = (x / width) * viewport[0] - viewport[0] / 2
py = -(y / height) * viewport[1] + viewport[1] / 2
direction = normalize(np.array([px, py, -viewport[2]]) - camera)
color = ray_trace(camera, direction, spheres)
pixels[x, y] = tuple(color.astype(int))
image.show()
import numpy as np
from PIL import Image
def normalize(v):
norm = np.linalg.norm(v)
if norm == 0:
return v
return v / norm
def intersect_sphere(origin, direction, sphere):
oc = origin - sphere['center']
a = np.dot(direction, direction)
b = 2.0 * np.dot(oc, direction)
c = np.dot(oc, oc) - sphere['radius']**2
discriminant = b**2 - 4*a*c
if discriminant < 0:
return False, None
else:
t = (-b - np.sqrt(discriminant)) / (2.0 * a)
return True, t
def ray_trace(origin, direction, spheres):
closest_t = float('inf')
hit_sphere = None
for sphere in spheres:
hit, t = intersect_sphere(origin, direction, sphere)
if hit and t < closest_t:
closest_t = t
hit_sphere = sphere
if hit_sphere is None:
return np.array([0, 0, 0])
hit_point = origin + closest_t * direction
normal = normalize(hit_point - hit_sphere['center'])
light_dir = normalize(np.array([1, 1, -1]))
intensity = max(np.dot(normal, light_dir), 0)
color = intensity * hit_sphere['color']
return color
width, height = 800, 600
camera = np.array([0, 0, 1])
viewport = np.array([2, 1.5, 1])
image = Image.new("RGB", (width, height))
pixels = image.load()
spheres = [
{'center': np.array([0, 0, -5]), 'radius': 1, 'color': np.array([255, 0, 0])},
{'center': np.array([-2, 1, -6]), 'radius': 1, 'color': np.array([0, 255, 0])},
{'center': np.array([2, 1, -6]), 'radius': 1, 'color': np.array([0, 0, 255])}
]
for y in range(height):
for x in range(width):
px = (x / width) * viewport[0] - viewport[0] / 2
py = -(y / height) * viewport[1] + viewport[1] / 2
direction = normalize(np.array([px, py, -viewport[2]]) - camera)
color = ray_trace(camera, direction, spheres)
pixels[x, y] = tuple(color.astype(int))
image.show()