-
Notifications
You must be signed in to change notification settings - Fork 135
Patching to enable building swift-apis on current nightly Swift #1184
base: main
Are you sure you want to change the base?
Changes from all commits
411ba0e
754b49e
fb81635
e8fb8a5
c0e7fb4
4a1d761
63bc5ca
652f815
335f94d
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -118,7 +118,7 @@ if(ENABLE_PYTHON_SUPPORT) | |
GIT_REPOSITORY | ||
git://github.com/pvieito/PythonKit | ||
GIT_TAG | ||
master | ||
6a05a15 | ||
CMAKE_ARGS | ||
-D BUILD_SHARED_LIBS=YES | ||
-D CMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE} | ||
|
@@ -189,7 +189,7 @@ if(NOT X10_FOUND AND NOT USE_BUNDLED_X10) | |
COMMAND | ||
rm -rf <SOURCE_DIR>/bazel-bin # ${CMAKE_COMMAND} -E rm -Rrf <SOURCE_DIR>/bazel-bin | ||
COMMAND | ||
bazel build ${VISIBILITY_FLAGS} -c opt --define framework_shared_object=false //tensorflow/compiler/tf2xla/xla_tensor:x10 --nocheck_visibility | ||
bazel build ${VISIBILITY_FLAGS} -c opt --define framework_shared_object=false //tensorflow:tensorflow //tensorflow/compiler/tf2xla/xla_tensor:x10 --nocheck_visibility | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This enables one to extract a standalone X10 after a successful CMake build, so subsequent builds can be made with the Swift Package Manager. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This doesn't seem right. You should be able to do that irrespective. This is just building an additional label, which means that there is potentially a dependency that is missing? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Without this I think libtensorflow.so.xxx is not generated and I cannot copy them to a standalone There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Technically, the tensorflow core itself is not needed for the build, it is only needed at runtime. |
||
COMMAND | ||
bazel shutdown | ||
INSTALL_COMMAND | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -40,12 +40,12 @@ public struct Tensor<Scalar: TensorFlowScalar> { | |
@usableFromInline | ||
internal var _isScalarZero = false | ||
|
||
/// An internal workaround for SR-13263: debug info generation crash. | ||
@usableFromInline | ||
class SR13263Workaround {} | ||
// /// An internal workaround for SR-13263: debug info generation crash. | ||
// @usableFromInline | ||
// class SR13263Workaround {} | ||
|
||
/// An internal workaround for SR-13263: debug info generation crash. | ||
internal var _sr13263Workaround: SR13263Workaround? | ||
// /// An internal workaround for SR-13263: debug info generation crash. | ||
// internal var _sr13263Workaround: SR13263Workaround? | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why comment this out? Is it no longer needed? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Leaving this as-is triggers a compiler crash, so I removed this workaround. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This was a workaround for an actual issue. I think that it would be better to verify if the underlying issue has been resolved or not. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The underlying issue has migrated in that you can now trigger the same crash by inserting this placeholder, rather than avoiding the crash by having it. I've seen this with other types. Unclear when the behavior inverted. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not seeing this happen with or without Nevermind. I reproduced the crash on recent nightly toolchains, using the reproducer from swiftlang/swift#55703. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I'm not seeing the crash affect S4TF, with or without export TENSORFLOW_USE_RELEASE_TOOLCHAIN=1
cd s4tf
swift build -Xswiftc -DTENSORFLOW_USE_STANDARD_TOOLCHAIN \
-c release -Xswiftc -g -Xcc -I${DESTDIR}/usr/include -Xlinker -L${DESTDIR}/usr/lib
rm -f .build/release/libx10.dylib
cp ${DESTDIR}/usr/lib/libx10.dylib .build/release/libx10.dylib
swift test -Xswiftc -DTENSORFLOW_USE_STANDARD_TOOLCHAIN \
-c release -Xswiftc -g -Xcc -I${DESTDIR}/usr/include -Xlinker -L${DESTDIR}/usr/lib |
||
|
||
@inlinable | ||
public init(handle: TensorHandle<Scalar>) { | ||
|
@@ -132,7 +132,7 @@ extension Tensor { | |
/// Reshape to scalar. | ||
/// - Precondition: The tensor has exactly one scalar. | ||
@inlinable | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public func scalarized() -> Scalar { | ||
precondition( | ||
shape.contiguousSize == 1, | ||
|
@@ -174,7 +174,7 @@ extension Tensor { | |
return handle.makeHostCopy() | ||
} | ||
|
||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public var scalars: [Scalar] { | ||
if handle.backend == .XLA { | ||
let (storage, _) = xlaTensor.fetchTensorValues(Scalar.self) | ||
|
@@ -203,7 +203,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint { | |
|
||
extension Tensor { | ||
/// Creates a 0-D tensor from a scalar value. | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public init(_ value: Scalar, on device: Device = .default) { | ||
switch device.backend { | ||
case .XLA: | ||
|
@@ -227,7 +227,7 @@ extension Tensor where Scalar: TensorFlowFloatingPoint { | |
extension Tensor { | ||
/// Creates a 1D tensor from scalars. | ||
@inlinable | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public init(_ scalars: [Scalar], on device: Device = .default) { | ||
self.init(shape: [scalars.count], scalars: scalars, on: device) | ||
} | ||
|
@@ -247,7 +247,7 @@ extension Tensor { | |
/// - scalars: The scalar contents of the tensor. | ||
/// - Precondition: The product of the dimensions of the shape must equal the number of scalars. | ||
@inlinable | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public init(shape: TensorShape, scalars: [Scalar], on device: Device = .default) { | ||
precondition( | ||
shape.contiguousSize == scalars.count, | ||
|
@@ -628,7 +628,7 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric { | |
/// Adds two tensors and produces their sum. | ||
/// - Note: `+` supports broadcasting. | ||
@inlinable | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public static func + (lhs: Tensor, rhs: Tensor) -> Tensor { | ||
if lhs._isScalarZero { | ||
return rhs | ||
|
@@ -641,7 +641,7 @@ extension Tensor: AdditiveArithmetic where Scalar: Numeric { | |
/// Subtracts one tensor from another and produces their difference. | ||
/// - Note: `-` supports broadcasting. | ||
@inlinable | ||
@differentiable(where Scalar: TensorFlowFloatingPoint) | ||
@differentiable(reverse where Scalar: TensorFlowFloatingPoint) | ||
public static func - (lhs: Tensor, rhs: Tensor) -> Tensor { | ||
if rhs._isScalarZero { | ||
return lhs | ||
|
@@ -745,7 +745,7 @@ public protocol TensorProtocol { | |
public protocol DifferentiableTensorProtocol: | ||
TensorProtocol & Differentiable & EuclideanDifferentiable | ||
where Scalar: TensorFlowFloatingPoint { | ||
@differentiable(wrt: self) | ||
@differentiable(reverse, wrt: self) | ||
func annotate(_ annotation: String) -> Self | ||
} | ||
|
||
|
@@ -773,7 +773,7 @@ where Scalar: TensorFlowFloatingPoint { | |
/// | ||
/// - Parameter annotation: The annotation to be added. | ||
/// - Returns: The annotated tensor. | ||
@differentiable(wrt: self) | ||
@differentiable(reverse, wrt: self) | ||
public func annotate(_ annotation: String) -> Tensor<Scalar> { | ||
switch handle.backend { | ||
case .XLA: | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
6a05a15 is the last commit before CMake is removed.