Any update on this? Our team all have m2-m4 mbp and they do not trust the machines because simple matrix computation give wrong results
This is a simple test we did
from rich import print
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "4"
import numpy as np
import tensorflow as tf
RT = tf.constant(
[
[-0.25497323, -0.81989247, -0.5126062, 0.3136883],
[-0.32365915, 0.57191426, -0.75376326, 0.36354592],
[0.9111716, -0.02627973, -0.41118845, 0.511739],
[0.0, 0.0, 0.0, 1.0],
]
)
def invert(RT):
"""
I found this bug while applying an inverse transform to a vector
and the inverted transform was wrong
"""
# test data from my dataset
R = RT[..., :3, :3]
T = RT[..., :3, 3]
R_inv = tf.einsum("...ij->...ji", R)
T_inv = -tf.einsum("...ij,...j->...i", R_inv, T) # (..., 3)
return T_inv
print("Numpy Sanity check")
print(f"np inv:\n{np.linalg.inv(RT)[:3, 3]}")
print(f"np inv Float32:\n{np.linalg.inv(tf.cast(RT, tf.float32))[:3, 3]}")
print(f"np inv Float16:\n{np.linalg.inv(tf.cast(RT, tf.float16))}")
with tf.device("/GPU:0"):
res = invert(RT)
print(f"\nTF on {res.device}")
print(f"tf inv:\n{res}")
print(f"tf inv Float32:\n{invert(tf.cast(RT, tf.float32))}")
print(f"tf inv Float16:\n{invert(tf.cast(RT, tf.float16))}")
with tf.device("/CPU:0"):
res = invert(RT)
print(f"\nTF on {res.device}")
print(f"tf inv:\n{res}")
print(f"tf inv Float32:\n{invert(tf.cast(RT, tf.float32))}")
print(f"tf inv Float16:\n{invert(tf.cast(RT, tf.float16))}")
Topic:
Machine Learning & AI
SubTopic:
General
Tags: