Skip to content

Not able to spawn sensor #8857

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
IAMHAADICOOL opened this issue Apr 21, 2025 · 0 comments
Open

Not able to spawn sensor #8857

IAMHAADICOOL opened this issue Apr 21, 2025 · 0 comments

Comments

@IAMHAADICOOL
Copy link

Setup
Describe the setup you are using to run CARLA along with its version:

  • CARLA version: 0.9.15
  • Platform: Ubuntu 22.04
  • Python version: 3.9.21
  • GPU: NVIDIA RTX 4070 Laptop
  • GPU Drivers: 550.21

Describe the bug
I am using the following env.py
from env import routes

import random
import re
import math
import os
import queue
import numpy as np
import matplotlib.pyplot as plt
import time
import carla
from carla_agents.navigation.basic_agent import BasicAgent
from carla_agents.navigation.local_planner import RoadOption
from carla_agents.navigation.global_route_planner import GlobalRoutePlanner


class CarlaEnv:
    def __init__(self, world='Town02', fps=10, image_w=256, image_h=112,
                 evaluate=False, on_test_set=False, eval_image_w=720, eval_image_h=480):

        self.image_w = image_w
        self.image_h = image_h
        self.evaluate = evaluate
        self.eval_image_w = eval_image_w
        self.eval_image_h = eval_image_h

        # episode variables
        self.max_episode_steps = 150 if not evaluate else 250
        self.episode_number = -2
        self.obs_number = 0
        self.current_route = 0

        # spawn and destination location
        self.on_test_set = on_test_set
        if not on_test_set:
            self.spawns = routes.train_spawns
            self.rotations = routes.train_rotations
            self.dests = routes.train_dests
        else:
            self.spawns = routes.test_spawns
            self.rotations = routes.test_rotations
            self.dests = routes.test_dests

        # input/output dimension (3 RGB cameras and 3 actions)
        self.observation_space = (9, image_h, image_w)
        self.action_space = (3,)

        # connecting to carla
        self.client = carla.Client('localhost', 2000)
        self.client.set_timeout(5.0)

        # setting the world up
        self.world = self.client.load_world(world)
        print('carla connected.')
        self.world.tick()
        # set weather
        rgx = re.compile('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)')
        name = lambda x: ' '.join(m.group(0) for m in rgx.finditer(x))
        presets = [x for x in dir(carla.WeatherParameters) if re.match('[A-Z].+', x)]
        presets = [(getattr(carla.WeatherParameters, x), name(x)) for x in presets]
        self.world.set_weather(presets[6][0])

        # fps settings
        settings = self.world.get_settings()
        settings.synchronous_mode = True
        settings.fixed_delta_seconds = 1 / fps
        self.world.apply_settings(settings)
        print("Reached line 68 in env.py")
        # calculate the route to destination (to calculate distance)
        world_map = self.world.get_map()
        planner = GlobalRoutePlanner(world_map, sampling_resolution=1.0)
        print("Reached line 72 in env.py")
        route = []
        for i in range(len(self.spawns)):
            print("Reached line 75 in env.py")
            start_waypoint = world_map.get_waypoint(self.spawns[i])
            end_waypoint = world_map.get_waypoint(self.dests[i])
            route.append(planner.trace_route(
                start_waypoint.transform.location, end_waypoint.transform.location))

        # calculate distance to destination for every point along the route
        self.distances = [[] for _ in range(len(self.spawns))]

        for i in range(len(self.spawns)):
            for j in range(len(route[i]) - 1):
                p1 = route[i][j][0].transform.location
                p2 = route[i][j+1][0].transform.location
                distance = math.sqrt((p1.x - p2.x) ** 2 + (p1.y - p2.y) ** 2)
                self.distances[i].append(distance)
            self.distances[i].append(0)

            for j in reversed(range(len(route[i]) - 1)):
                self.distances[i][j] += self.distances[i][j+1]

        # recording directory
        if self.evaluate:
            main_dir = 'agent_rollout'
            if not os.path.exists(main_dir):
                os.makedirs(main_dir)
            self.dir = os.path.join(main_dir, 'train' if not self.on_test_set else 'test')

        # sensor variables
        self.image_queues = [queue.Queue(), queue.Queue(), queue.Queue()]
        self.eval_image_queue = queue.Queue()
        self.vehicle = None
        self.agent = None
        self.cameras = []
        self.eval_camera = None
        self.collision_sensor = None
        self.lane_invasion_sensor = None
        self.early_terminate = False
        self.invaded = False
        print("Reached line 113 in env.py")

    def reset(self, route=None):
        print("Reached line 116 in env.py")
        self.episode_number += 1
        self.obs_number = 0
        self.early_terminate = False
        self.invaded = False

        # choose a route based on episode number or the route passed as an argument
        if route is None:
            self.current_route = self.episode_number % len(self.spawns)
        else:
            self.current_route = route
        print("Reached line 127 in env.py")
        # deleting vehicle and sensors (if they already exist)
        self.image_queues = [queue.Queue(), queue.Queue(), queue.Queue()]
        self.destroy_sensors()
        self.cameras = []

        # spawning vehicle
        blueprint_lib = self.world.get_blueprint_library()
        vehicle_bp = blueprint_lib.filter('model3')[0]
        vehicle_spawn_point = carla.Transform(self.spawns[self.current_route], self.rotations[self.current_route])
        self.vehicle = self.world.spawn_actor(vehicle_bp, vehicle_spawn_point)

        # collision sensor
        self.collision_sensor = self.world.spawn_actor(
            blueprint_lib.find('sensor.other.collision'), carla.Transform(), attach_to=self.vehicle)
        self.collision_sensor.listen(lambda event: self.terminate())
        print("Collision sensor spawned. Ticking world...")
        self.world.tick()
        print("Tick after collision sensor OK.")

        print("Reached line 144 in env.py")
        # lane invasion sensor
        self.lane_invasion_sensor = self.world.spawn_actor(
            blueprint_lib.find('sensor.other.lane_invasion'), carla.Transform(), attach_to=self.vehicle)
        self.lane_invasion_sensor.listen(lambda event: self.invade())
        print("Lane invasion sensor spawned. Ticking world...")
        self.world.tick()
        print("Tick after lane invasion sensor OK.")

        # setting up main cameras
        bound_x = self.vehicle.bounding_box.extent.x
        bound_y = self.vehicle.bounding_box.extent.y
        bound_z = self.vehicle.bounding_box.extent.z

        print("Reached line 155 in env.py")
        # left camera
        camera_bp = blueprint_lib.find('sensor.camera.rgb')
        camera_bp.set_attribute('image_size_x', f'{self.image_w}')
        camera_bp.set_attribute('image_size_y', f'{self.image_h}')
        camera_bp.set_attribute('fov', str(120))
        camera_spawn_point = carla.Transform(
            carla.Location(x=-0.2 * bound_x, y=-bound_y, z=0.4 * (0.5 + bound_z)),
            carla.Rotation(yaw=295, pitch=-20))
        self.cameras.append(self.world.spawn_actor(camera_bp, camera_spawn_point, attach_to=self.vehicle,
                                                   attachment_type=carla.AttachmentType.Rigid))
        self.cameras[0].listen(self.image_queues[0].put)
        print("LEFT camera spawned. Ticking world...")
        self.world.tick() # <<<--- DOES IT CRASH HERE?
        print("Tick after LEFT camera OK.")

        # front camera
        camera_bp = blueprint_lib.find('sensor.camera.rgb')
        camera_bp.set_attribute('image_size_x', f'{self.image_w}')
        camera_bp.set_attribute('image_size_y', f'{self.image_h}')
        camera_spawn_point = carla.Transform(
            carla.Location(x=0.5 + bound_x, y=0.0, z=0.5 + bound_z),
            carla.Rotation(pitch=-15))
        self.cameras.append(self.world.spawn_actor(camera_bp, camera_spawn_point, attach_to=self.vehicle,
                                                   attachment_type=carla.AttachmentType.Rigid))
        self.cameras[1].listen(self.image_queues[1].put)
        print("FRONT camera spawned. Ticking world...")
        self.world.tick()
        print("Tick after FRONT camera OK.")

        # right camera
        camera_bp = blueprint_lib.find('sensor.camera.rgb')
        camera_bp.set_attribute('image_size_x', f'{self.image_w}')
        camera_bp.set_attribute('image_size_y', f'{self.image_h}')
        camera_bp.set_attribute('fov', str(120))
        camera_spawn_point = carla.Transform(
            carla.Location(x=-0.2 * bound_x, y=bound_y, z=0.4 * (0.5 + bound_z)),
            carla.Rotation(yaw=65, pitch=-20))
        right_camera = self.world.spawn_actor(camera_bp, camera_spawn_point, attach_to=self.vehicle,
                                                   attachment_type=carla.AttachmentType.Rigid)
        self.cameras.append(right_camera)
        self.cameras[2].listen(self.image_queues[2].put)
        print("RIGHT camera spawned. Ticking world...")
        self.world.tick()
        print("Tick after RIGHT camera OK.")
        

        print("Reached line 191 in env.py")
        # setting up evaluation camera
        if self.evaluate:
            camera_bp = blueprint_lib.find('sensor.camera.rgb')
            camera_bp.set_attribute('image_size_x', f'{self.eval_image_w}')
            camera_bp.set_attribute('image_size_y', f'{self.eval_image_h}')
            camera_spawn_point = carla.Transform(
                carla.Location(x=-2.0*(0.5+bound_x), y=0.0, z=2.0*(0.5+bound_z)),
                carla.Rotation(pitch=8.0)
            )
            self.eval_camera = self.world.spawn_actor(
                camera_bp,
                camera_spawn_point,
                attach_to=self.vehicle,
                attachment_type=carla.AttachmentType.SpringArm
            )

            self.eval_camera.listen(self.eval_image_queue.put)

        # episode record directory
        if self.evaluate and self.episode_number != -1:
            sub_dir = os.path.join(self.dir, f'ep_{self.episode_number}')
            if not os.path.exists(sub_dir):
                os.makedirs(sub_dir)

        print("Reached line 216 in env.py")
        # generate and process the first observation
        self.world.tick()

        # self.world.tick()
         # --- Simple loop: Tick until all queues have data ---
        print("Waiting for initial sensor data (simple loop)...")
        # Continue looping as long as ANY queue is empty
        while self.image_queues[0].empty() or \
              self.image_queues[1].empty() or \
              self.image_queues[2].empty():
            # Tick the world to allow sensors to produce data
            print("  Ticking world, waiting for queues...")
            self.world.tick()
            # Optional tiny sleep to prevent potential high CPU if tick is extremely fast
            # import time
            time.sleep(0.001)
        print("All sensor queues have data now.")

        print("Reached line 219 in env.py")
        img_0 = self.process_image(self.image_queues[0].get(timeout=2))
        img_1 = self.process_image(self.image_queues[1].get(timeout=2))
        img_2 = self.process_image(self.image_queues[2].get(timeout=2))
        obs = np.vstack((img_0, img_1, img_2))
        print("Reached line 224 in env.py")
        if self.evaluate and self.episode_number != -1:
            image = self.process_image(self.eval_image_queue.get())
            self.save_image(image)

        # setup agent to provide high-level commands
        self.agent = BasicAgent(self.vehicle)
        self.agent.ignore_traffic_lights(active=False)
        self.agent.set_destination(self.dests[self.current_route])

        # initial command: move forward
        command = np.array([0.0, 1.0, 0.0])

        # vehicle speed
        speed = 0

        print("Reached line 235 in env.py")
        return obs, command, speed

    def step(self, action):

        # add noise to steer (used for testing robustness of agent)
        # action[1] = 0
        # if random.random() < 0.25:
        #     action[1] += 0.2

        # clipping values
        # print("Reached line 246 in env.py")
        throttle = float(np.clip(action[0], 0, 1))
        steer = float(np.clip(action[1], -1, 1))
        brake = float(np.clip(action[2], 0, 1))
        if brake < 0.01:
            brake = 0

        # if self.obs_number < 50:
        #     throttle, steer, brake = 0, 0, 0

        # applying the action
        self.vehicle.apply_control(
            carla.VehicleControl(throttle=throttle, steer=steer, brake=brake))
        self.world.tick()

        # get the next observation
        img_0 = self.process_image(self.image_queues[0].get(timeout=2.0))
        img_1 = self.process_image(self.image_queues[1].get(timeout=2.0))
        img_2 = self.process_image(self.image_queues[2].get(timeout=2.0))
        obs = np.vstack((img_0, img_1, img_2))

        if self.evaluate:
            image = self.process_image(self.eval_image_queue.get())
            self.save_image(image)
        self.obs_number += 1

        # get high-level command from agent's global planner
        _, road_option, num_points_done = self.agent.run_step()

        if road_option == RoadOption.LANEFOLLOW or road_option == RoadOption.STRAIGHT:
            command = np.array([0.0, 1.0, 0.0])
        elif road_option == RoadOption.LEFT:
            command = np.array([1.0, 0.0, 0.0])
        elif road_option == RoadOption.RIGHT:
            command = np.array([0.0, 0.0, 1.0])

        # calculate distance to destination
        location = self.vehicle.get_transform().location
        aerial_dist = math.sqrt(
            (self.dests[self.current_route].x - location.x) ** 2
            + (self.dests[self.current_route].y - location.y) ** 2)
        road_dist = self.distances[self.current_route][num_points_done]

        # check episode termination
        if self.obs_number == self.max_episode_steps or self.early_terminate:
            done = True
            info = {'distance': road_dist}
        elif aerial_dist < 5 or road_dist < 5:
            done = True
            info = {'distance': 0}
        else:
            done = False
            info = {}

        # environment reward (in case of collision or lane invasion, -25)
        if self.invaded or self.early_terminate:
            reward = -5
            self.invaded = False
        else:
            reward = 0

        # vehicle speed
        v = self.vehicle.get_velocity()
        speed = math.sqrt(v.x ** 2 + v.y ** 2 + v.z ** 2)

        # print(self.obs_number, command, road_dist)
        print("Reached line 312 in env.py")
        return obs, command, speed, reward, done, info

    # def destroy_sensors(self):
    #     sensors = [*self.cameras, self.eval_camera, self.collision_sensor, self.lane_invasion_sensor]
    #     for sensor in sensors:
    #         if sensor is not None:
    #             print(sensor)
    #             sensor.stop()
    #             sensor.destroy()
    #     if self.vehicle:
    #         self.vehicle.destroy()
    #     self.world.tick()

    def destroy_sensors(self):
        print("Destroying sensors and vehicle...")
        # Use a client instance for batch destruction if preferred, or keep individual
        client = self.client # Get client reference if needed

        actors_to_destroy = []
        sensors_to_stop = []

        # Collect sensors that need stopping/destroying
        all_sensors = [*self.cameras, self.eval_camera, self.collision_sensor, self.lane_invasion_sensor]
        for sensor in all_sensors:
            if sensor is not None:
                # Check if alive before adding to list (optional robustness)
                try:
                    if sensor.is_alive:
                        sensors_to_stop.append(sensor)
                        actors_to_destroy.append(sensor.id)
                    else: # Already destroyed or invalid?
                        print(f"Warning: Sensor {getattr(sensor, 'id', 'unknown')} found but not alive during destroy.")
                except Exception as e:
                    print(f"Warning: Error checking sensor state during destroy: {e}")


        # Stop listeners first (important!)
        print(f"Stopping {len(sensors_to_stop)} sensors...")
        for sensor in sensors_to_stop:
            try:
                if sensor.is_listening:
                    sensor.stop()
            except Exception as e:
                print(f"Warning: Error stopping sensor {sensor.id}: {e}")

        # Collect vehicle if alive
        if self.vehicle is not None:
            try:
                if self.vehicle.is_alive:
                    actors_to_destroy.append(self.vehicle.id)
                else:
                    print(f"Warning: Vehicle {self.vehicle.id} found but not alive during destroy.")
            except Exception as e:
                print(f"Warning: Error checking vehicle state during destroy: {e}")


        # Destroy actors using batch command
        if actors_to_destroy:
            print(f"Sending destroy commands for actors: {actors_to_destroy}")
            client.apply_batch_sync([carla.command.DestroyActor(actor_id) for actor_id in actors_to_destroy], True)
            print("Batch destroy command sent and acknowledged.")
        else:
            print("No live actors needed destruction.")

        # --- CRITICAL: Tick the world to process destruction ---
        print("Ticking world once to process destruction...")
        self.world.tick()
        # You could even add a second tick for good measure if needed
        # print("Ticking world second time after destruction...")
        # self.world.tick()
        print("Post-destruction tick complete.")
        # ---

        # Clear Python references AFTER destruction is processed
        self.cameras = []
        self.eval_camera = None
        self.collision_sensor = None
        self.lane_invasion_sensor = None
        self.vehicle = None
        print("Actor references cleared.")

    def close(self):
        self.destroy_sensors()
        print('carla disconnected.')

    def terminate(self):
        self.early_terminate = True

    def invade(self):
        self.invaded = True

    def save_image(self, image):
        plt.imsave(os.path.join(self.dir, f'ep_{self.episode_number}', f'obs_{self.obs_number:03}.png'),
                   image.transpose(1, 2, 0))

    @staticmethod
    def process_image(image):
        array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
        array = np.reshape(array, (image.height, image.width, 4))
        array = array[:, :, :3]
        array = array[:, :, ::-1]
        array = array.transpose((2, 0, 1))
        return array.copy()

and using the following rollout function for ppo agent:
` # rollout for num_steps in the environment
def rollout(self, start_global_step):

    global_step = start_global_step

    for step in range(0, self.num_steps):

        global_step += 1
        self.obs[step] = self.next_obs
        self.commands[step] = self.next_command
        self.speeds[step] = self.next_speed
        self.dones[step] = self.next_done

        # action logic
        with torch.no_grad():
            action, logproba, _, value = self.get_action_and_value(
                self.obs[step].unsqueeze(0), self.commands[step].unsqueeze(0), self.speeds[step].unsqueeze(0))

        self.values[step] = value.view(-1)
        self.actions[step] = action.view(-1)
        self.logprobs[step] = logproba.view(-1)

        # execute the game and log data
        next_obs, next_command, next_speed, reward, done, info = self.env.step(action.view(-1).cpu().numpy())
        self.rewards[step] = torch.tensor(reward).to(self.device)
        self.next_obs = torch.Tensor(next_obs).to(self.device)
        self.next_command = torch.Tensor(next_command).to(self.device)
        self.next_speed = torch.Tensor([next_speed]).to(self.device)
        self.next_done = torch.tensor(int(done)).to(self.device)

        if self.next_done.item() == 1:
            obs, command, speed = self.env.reset()
            self.next_obs = torch.Tensor(obs).to(self.device)
            self.next_command = torch.Tensor(command).to(self.device)
            self.next_speed = torch.Tensor([speed]).to(self.device)

        if 'distance' in info.keys():
            self.writer.add_scalar('charts/distance', info['distance'], global_step)

    return global_step`

And the third camera is not spawning, I don't know why! I have tried everything!
Steps to reproduce
Explain the steps needed to reproduce the issue.

Expected behavior
A clear and concise description of what you expected to happen.

Logs
Paste the server and client logs. Server ones can be found at:

  • Unreal Engine 4: Unreal/CarlaUE4/Saved/Logs
  • Unreal Engine 5: Unreal/CarlaUnreal/Saved/Logs

Scripts
Add any scripts relevant for the reproducibility of the error.

Screenshots
Image

Additional context
Add any other context about the problem here.

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant