diff --git a/commands/shooter/alignandaim.py b/commands/shooter/alignandaim.py index 15e5e41..9b8b5c4 100644 --- a/commands/shooter/alignandaim.py +++ b/commands/shooter/alignandaim.py @@ -75,7 +75,7 @@ def initialize(self): def calculateTimeVelocityAngle( self, position: Translation3d ) -> typing.Tuple[float, float, Rotation2d, Rotation2d, float]: - botPose = self.drive.estimator.getEstimatedPosition() + botPose = self.drive.estimator.estimatedPose target2d = position.toTranslation2d() deltaTranslation = botPose.translation() - target2d @@ -109,7 +109,7 @@ def calculateTimeVelocityAngle( ) def execute(self): - botPose = self.drive.estimator.getEstimatedPosition() + botPose = self.drive.estimator.estimatedPose robotVelocity = getSDArray(constants.kDriveVelocityKeys, [0, 0, 0]) time, velocity, psi, theta, distance = 0, 0, Rotation2d(), Rotation2d(), 0 target = self.targetPose.translation() diff --git a/constants.py b/constants.py index c6c9967..99d909f 100644 --- a/constants.py +++ b/constants.py @@ -479,6 +479,9 @@ kRobotToTagIdKey = "vision/ids" kRobotToTagAmbiguityKey = "vision/ambiguity" +kXyStdDevCoeff = 0.005 +kThetaStdDevCoeff = 0.01 + kTargetName = "Target" kApriltagPositionDict = { # thanks 6328 for FieldConstants! diff --git a/physics.py b/physics.py index 8a1a916..620538a 100644 --- a/physics.py +++ b/physics.py @@ -292,6 +292,7 @@ class PhysicsEngine: # pylint: disable-next=unused-argument def __init__(self, physics_controller: PhysicsInterface, robot: MentorBot): + assert robot.container is not None self.physics_controller = physics_controller self.bot = robot diff --git a/subsystems/drivesubsystem.py b/subsystems/drivesubsystem.py index 6ac9fb2..1c89097 100644 --- a/subsystems/drivesubsystem.py +++ b/subsystems/drivesubsystem.py @@ -1,6 +1,7 @@ from enum import Enum, auto from functools import partial +from math import sqrt from typing import Tuple import typing from commands2 import Subsystem @@ -16,7 +17,15 @@ DriverStation, ) -from wpimath.geometry import Pose2d, Rotation2d, Translation2d, Transform3d, Pose3d +from wpimath.geometry import ( + Pose2d, + Rotation2d, + Transform2d, + Translation2d, + Transform3d, + Pose3d, + Twist2d, +) from wpimath.filter import SlewRateLimiter from wpimath.kinematics import ( ChassisSpeeds, @@ -25,7 +34,7 @@ SwerveDrive4Odometry, SwerveModulePosition, ) -from wpimath.estimator import SwerveDrive4PoseEstimator +from wpimath.interpolation import TimeInterpolatablePose2dBuffer from pathplannerlib.auto import AutoBuilder @@ -323,18 +332,17 @@ def __init__(self, vision: VisionSubsystem) -> None: self.gyro.configurator.apply(toApply) self.gyro.get_yaw().set_update_frequency(100) - self.estimator = SwerveDrive4PoseEstimator( + self.estimator = RobotPoseEstimator( self.kinematics, self.getRotation(), - [ + ( self.frontLeftModule.getPosition(), self.frontRightModule.getPosition(), self.backLeftModule.getPosition(), self.backRightModule.getPosition(), - ], + ), Pose2d(), - [0.1, 0.1, 0.1], - [0.4, 0.4, 0.4], + (0.1, 0.1, 0.1), ) # standard deviations stolen from 2910 @@ -408,7 +416,7 @@ def resetGyro(self, pose: Pose2d): # self.resetSimPosition(pose) def getPose(self) -> Pose2d: - translation = self.estimator.getEstimatedPosition().translation() + translation = self.estimator.estimatedPose.translation() rotation = self.getRotation() return Pose2d(translation, rotation) @@ -520,22 +528,24 @@ def periodic(self): estimatedCameraPoses = self.vision.poseList hasTargets = False + odoMeasure = OdometryObservation( + [*swervePositions], self.getRotation(), self.printTimer.getFPGATimestamp() + ) + self.estimator.addOdometryMeasurement(odoMeasure) + for estimatedCameraPose in estimatedCameraPoses: if estimatedCameraPose.hasTargets: - self.estimator.addVisionMeasurement( + visionMeasure = VisionObservation( estimatedCameraPose.pose.toPose2d(), estimatedCameraPose.timestamp, + estimatedCameraPose.stdDev, ) + self.estimator.addVisionMeasurement(visionMeasure) hasTargets = True - self.estimator.updateWithTime( - self.printTimer.getFPGATimestamp(), - self.odometry.getPose().rotation(), - [*swervePositions], - ) self.vision.poseList.clear() - self.visionEstimate = self.estimator.getEstimatedPosition() + self.visionEstimate = self.estimator.estimatedPose # I swear there's an easier way to do this but I couldn't figure it out speakerDistance = ( @@ -676,3 +686,135 @@ def arcadeDriveWithSpeeds( moduleStates = self.kinematics.toSwerveModuleStates(robotChassisSpeeds) self.applyStates(moduleStates) + + +# shamelessly reimplemented from 6328 +class OdometryObservation: + def __init__( + self, + wheelPositions: tuple[ + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + ], + gyroAngle: Rotation2d, + timestamp: float, + ) -> None: + self.wheelPositions = wheelPositions + self.gyroAngle = gyroAngle + self.timestamp = timestamp + + +class VisionObservation: + def __init__(self, visionPose: Pose2d, timestamp: float, std: list[float]) -> None: + assert len(std) == 3 + self.visionPose = visionPose + self.timestamp = timestamp + self.std = std + + +class RobotPoseEstimator: + # pylint:disable-next=too-many-arguments, too-many-positional-arguments + def __init__( + self, + kinematics: SwerveDrive4Kinematics, + gyro: Rotation2d, + startWheelPositions: tuple[ + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + ], + startPose: Pose2d, + odoStdDevs: tuple[float, float, float], + ) -> None: + self.kinematics = kinematics + self.lastGyroAngle = gyro + self.lastWheelPositions = startWheelPositions + self.odometryPose = startPose + self.estimatedPose = startPose + + self.odoStdDevs = odoStdDevs + + self.poseBuffer = TimeInterpolatablePose2dBuffer(2.0) + + def addOdometryMeasurement(self, measurement: OdometryObservation): + newPositions: list[SwerveModulePosition] = [] + for old, new in zip(self.lastWheelPositions, measurement.wheelPositions): + newPositions.append( + SwerveModulePosition(new.distance - old.distance, new.angle) + ) + + twist = self.kinematics.toTwist2d(newPositions) + twist = Twist2d( + twist.dx, twist.dy, (measurement.gyroAngle - self.lastGyroAngle).radians() + ) + + self.odometryPose = self.odometryPose.exp(twist) + self.poseBuffer.addSample(measurement.timestamp, self.odometryPose) + + self.estimatedPose = self.estimatedPose.exp(twist) + + self.lastGyroAngle = measurement.gyroAngle + self.lastWheelPositions = measurement.wheelPositions + + def addVisionMeasurement(self, measurement: VisionObservation): + # check if measurement is valid enough to work with + if self.poseBuffer.getInternalBuffer()[-1][0] - 2.0 > measurement.timestamp: + return + + sample = self.poseBuffer.sample(measurement.timestamp) + if sample is None: + return + + sampleToOdometryTransform = Transform2d(self.odometryPose, sample) + odometryToSampleTransform = Transform2d(sample, self.odometryPose) + + estimateAtTime = self.estimatedPose + odometryToSampleTransform + + # new vision matrix + r = [i * i for i in measurement.std] + + # Solve for closed form Kalman gain for continuous Kalman filter with A = 0 + # and C = I. See wpimath/algorithms.md. + visionK = [0.0, 0.0, 0.0] + + for i in range(3): + stdDev = self.odoStdDevs[i] + if stdDev == 0.0: + visionK[i] = 0.0 + else: + visionK[i] = stdDev / (stdDev + sqrt(stdDev * r[i])) + + transform = Transform2d(estimateAtTime, measurement.visionPose) + kTimesTransform = [ + visionK[i] * k + for i, k in enumerate( + [transform.X(), transform.Y(), transform.rotation().radians()] + ) + ] + + scaledTransform = Transform2d( + kTimesTransform[0], kTimesTransform[1], kTimesTransform[2] + ) + + self.estimatedPose = ( + estimateAtTime + scaledTransform + sampleToOdometryTransform + ) + + def resetPosition( + self, + gyro: Rotation2d, + startWheelPositions: tuple[ + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + SwerveModulePosition, + ], + startPose: Pose2d, + ): + self.lastGyroAngle = gyro + self.estimatedPose = startPose + self.odometryPose = startPose + self.lastWheelPositions = startWheelPositions diff --git a/subsystems/visionsubsystem.py b/subsystems/visionsubsystem.py index 4fe30e4..f93ceea 100644 --- a/subsystems/visionsubsystem.py +++ b/subsystems/visionsubsystem.py @@ -1,5 +1,5 @@ from collections import deque -from math import hypot, sin, tan, atan +from math import hypot, inf, sin, tan, atan # import numpy as np @@ -25,34 +25,36 @@ class EstimatedPose: - def __init__(self, pose: Pose3d, hasTargets: bool, timestamp: float): + def __init__( + self, pose: Pose3d, hasTargets: bool, timestamp: float, stdDev: list[float] + ): self.pose = pose self.hasTargets = hasTargets self.timestamp = timestamp + self.stdDev = stdDev class VisionCamera: # hi its landon here + cameraTransforms = { + "frontLeft": constants.kRobotToFrontLeftCameraTransform, + "frontRight": constants.kRobotToFrontRightCameraTransform, + "backLeft": constants.kRobotToBackLeftCameraTransform, + "backRight": constants.kRobotToBackRightCameraTransform, + } + cameraKeys = { + "frontLeft": constants.kPhotonvisionFrontLeftCameraKey, + "frontRight": constants.kPhotonvisionFrontRightCameraKey, + "backLeft": constants.kPhotonvisionBackLeftCameraKey, + "backRight": constants.kPhotonvisionBackRightCameraKey, + } + def __init__(self, camera: PhotonCamera): self.camera = camera self.name = camera.getName() - cameraKeys = { - "frontLeft": constants.kPhotonvisionFrontLeftCameraKey, - "frontRight": constants.kPhotonvisionFrontRightCameraKey, - "backLeft": constants.kPhotonvisionBackLeftCameraKey, - "backRight": constants.kPhotonvisionBackRightCameraKey, - } - - self.key = cameraKeys[self.name] + self.key = VisionCamera.cameraKeys[self.name] - cameraTransforms = { - "frontLeft": constants.kRobotToFrontLeftCameraTransform, - "frontRight": constants.kRobotToFrontRightCameraTransform, - "backLeft": constants.kRobotToBackLeftCameraTransform, - "backRight": constants.kRobotToBackRightCameraTransform, - } - - self.cameraToRobotTransform = cameraTransforms[self.name].inverse() + self.cameraToRobotTransform = VisionCamera.cameraTransforms[self.name].inverse() class VisionSubsystemReal(Subsystem): @@ -81,13 +83,8 @@ def __init__(self) -> None: # inst.setServer("localhost") # inst.startClient4("Robot Sim") - def periodic(self) -> None: - # self.estimatedPosition = self.drive.getPose() - # self.updateAdvantagescopePose() - - visionPose = getSDArray(constants.kRobotVisionPoseArrayKeys.valueKey, [0, 0, 0]) + def noteDetection(self) -> None: robotPose = getSDArray(constants.kRobotPoseArrayKeys.valueKey, [0, 0, 0]) - noteResult = self.noteCamera.getLatestResult() if noteResult.hasTargets(): notes = noteResult.getTargets() @@ -120,18 +117,28 @@ def periodic(self) -> None: else: SmartDashboard.putBoolean(constants.kNoteInViewKey.validKey, False) + def periodic(self) -> None: + visionPose = getSDArray(constants.kRobotVisionPoseArrayKeys.valueKey, [0, 0, 0]) + robotPose = getSDArray(constants.kRobotPoseArrayKeys.valueKey, [0, 0, 0]) + combinedPose = pose3dFrom2d(Pose2d(visionPose[0], visionPose[1], robotPose[2])) self.robotToTags = [] for camera in self.cameras: photonResult = camera.getLatestResult() hasTargets = len(photonResult.getTargets()) > 0 multitagresult = photonResult.multiTagResult + useVisionRotation = True bestRelativeTransform = multitagresult.estimatedPose.best + avgDistance = bestRelativeTransform.translation().norm() + tagPoses = [bestRelativeTransform * len(multitagresult.fiducialIDsUsed)] + currentCamera = VisionCamera(camera) ambiguity = 10 if not multitagresult.estimatedPose.isPresent: + useVisionRotation = False + tagPoses: list[Transform3d] = [] for result in photonResult.targets: # pylint:disable-next=consider-iterating-dictionary if result.fiducialId in constants.kApriltagPositionDict.keys(): @@ -139,6 +146,7 @@ def periodic(self) -> None: currentCamera.cameraToRobotTransform.inverse() + result.bestCameraToTarget ) + tagPoses.append(result.bestCameraToTarget) self.robotToTags.append( (botToTagPose, result.fiducialId, result.poseAmbiguity), ) @@ -152,6 +160,14 @@ def periodic(self) -> None: ) ambiguity = result.poseAmbiguity + if len(tagPoses) == 0: + continue + totalDistance = 0 + for pose in tagPoses: + totalDistance += pose.translation().norm() + + avgDistance = totalDistance / len(tagPoses) + VisionSubsystemReal.updateAdvantagescopePose( Pose3d() + bestRelativeTransform, currentCamera.key, @@ -163,9 +179,27 @@ def periodic(self) -> None: Pose3d() + bestRelativeTransform + currentCamera.cameraToRobotTransform ) + xyStdDev = ( + constants.kXyStdDevCoeff * (avgDistance * avgDistance) / len(tagPoses) + ) + thetaStdDev = ( + ( + constants.kThetaStdDevCoeff + * (avgDistance * avgDistance) + / len(tagPoses) + ) + if useVisionRotation + else inf + ) + if ambiguity < 10: self.poseList.append( - EstimatedPose(botPose, hasTargets, photonResult.getTimestamp()) + EstimatedPose( + botPose, + hasTargets, + photonResult.getTimestamp(), + [xyStdDev, xyStdDev, thetaStdDev], + ) ) if len(self.robotToTags) > 0: @@ -278,6 +312,8 @@ def periodic(self) -> None: for camera in self.cameras: seeTag = False botPose = Pose3d() + tagPoses: list[Transform3d] = [] + for tagId, apriltag in constants.kApriltagPositionDict.items(): if camera.canSeeTarget(simPose3d, apriltag): rngOffset = Transform3d( @@ -292,6 +328,7 @@ def periodic(self) -> None: botToTagPose = ( botToTagPose + rngOffset * botToTagPose.translation().norm() ) + tagPoses.append(Transform3d(simPose3d + camera.location, apriltag)) self.robotToTags.append( ( botToTagPose, @@ -316,8 +353,30 @@ def periodic(self) -> None: botPose + camera.location, camera.key, simPose3d, rel.camToTarg ) + if len(tagPoses) == 0: + continue + totalDistance = 0 + for transform in tagPoses: + totalDistance += transform.translation().norm() + + avgDistance = totalDistance / len(tagPoses) + + xyStdDev = ( + constants.kXyStdDevCoeff * (avgDistance * avgDistance) / len(tagPoses) + ) + thetaStdDev = ( + constants.kThetaStdDevCoeff + * (avgDistance * avgDistance) + / len(tagPoses) + ) + self.poseList.append( - EstimatedPose(botPose, seeTag, Timer.getFPGATimestamp()) + EstimatedPose( + botPose, + seeTag, + Timer.getFPGATimestamp(), + [xyStdDev, xyStdDev, thetaStdDev], + ) ) if len(self.robotToTags) > 0: