Hello,
I am looking for something to allow me to Anchor a webview component to the user, as in it follows their line of vision as they move.
I tried using RealityView with an Anchor Entity, but it raises an error of "Presentations are not permitted within volumetric window scene". Can I anchor the Window instead?
Selecting any option will automatically load the page
Post
Replies
Boosts
Views
Activity
Hello.
I'm trying to stop my webview from asking for mic permission every time it runs, as I already handle it out in the application. This is the code I use, but it doesn't seem to run the code for the permission at all (tried printing and debugging, and it doesn't go into it at all). I am using visionOs if that makes a difference. Are there any alternative solutions to this?
import SwiftUI
import WebKit
import AVFAudio
struct WebView: UIViewRepresentable {
let urlString: String
func makeUIView(context: Context) -> WKWebView {
let webView = WKWebView()
webView.navigationDelegate = context.coordinator
if let url = URL(string: urlString) {
let request = URLRequest(url: url)
webView.load(request)
}
return webView
}
func updateUIView(_ uiView: WKWebView, context: Context) {}
func makeCoordinator() -> Coordinator {
Coordinator()
}
class Coordinator: NSObject, WKUIDelegate, WKNavigationDelegate {
// Handle media capture permissions
func webView(_ webView: WKWebView,
decideMediaCapturePermissionsFor origin: WKSecurityOrigin,
initiatedBy frame: WKFrameInfo,
type: WKMediaCaptureType) async -> WKPermissionDecision {
return .grant
}
// Handle URL authentication challenges
func webView(_ webView: WKWebView, didReceive challenge: URLAuthenticationChallenge, completionHandler: @escaping (URLSession.AuthChallengeDisposition, URLCredential?) -> Void) {
let username = "user"
let password = "password"
let credential = URLCredential(user: username, password: password, persistence: .forSession)
completionHandler(.useCredential, credential)
}
}
}
struct AssistantView: View {
@Environment(\.dismissWindow) private var dismissWindow
var body: some View {
WebView(urlString: "https://somelink.com/")
.edgesIgnoringSafeArea(.all)
.frame(width: 500, height: 800)
}
}
Hello, I have created a view with a 360 image full view, and I need to perform a task when the user clicks anywhere on the screen (leave the dome), but no matter what I try, it just does not work, it doesn't print anything at all.
import SwiftUI
import RealityKit
import RealityKitContent
struct StreetWalk: View {
@Binding var threeSixtyImage: String
@Binding var isExitFaded: Bool
var body: some View {
RealityView { content in
// Create a material with a 360 image
guard let url = Bundle.main.url(forResource: threeSixtyImage, withExtension: "jpeg"),
let resource = try? await TextureResource(contentsOf: url) else {
// If the asset isn't available, something is wrong with the app.
fatalError("Unable to load starfield texture.")
}
var material = UnlitMaterial()
material.color = .init(texture: .init(resource))
// Attach the material to a large sphere.
let streeDome = Entity()
streeDome.name = "streetDome"
streeDome.components.set(ModelComponent(
mesh: .generatePlane(width: 1000, depth: 1000),
materials: [material]
))
// Ensure the texture image points inward at the viewer.
streeDome.scale *= .init(x: -1, y: 1, z: 1)
content.add(streeDome)
}
update: { updatedContent in
// Create a material with a 360 image
guard let url = Bundle.main.url(forResource: threeSixtyImage,
withExtension: "jpeg"),
let resource = try? TextureResource.load(contentsOf: url) else {
// If the asset isn't available, something is wrong with the app.
fatalError("Unable to load starfield texture.")
}
var material = UnlitMaterial()
material.color = .init(texture: .init(resource))
updatedContent.entities.first?.components.set(ModelComponent(
mesh: .generateSphere(radius: 1000),
materials: [material]
))
}
.gesture(tap)
}
var tap: some Gesture {
SpatialTapGesture().targetedToAnyEntity().onChanged{ value in
// Access the tapped entity here.
print(value.entity)
print("maybe you can tap the dome")
// isExitFaded.toggle()
}
}