首页 > 解决方案 > 如何在颤动中启用耳机而不是扬声器

问题描述

我已经完成了在颤动中调用点对点的方法,但是有一个问题,我怎样才能在颤动中启用耳机而不是扬声器。请帮我这样做,在建立从一个设备到另一台设备的呼叫时,它只启用扬声器而不是耳机。提前致谢。

Future<void> joinRoom(String roomId, RTCVideoRenderer remoteVideo) async {
  FirebaseFirestore db = FirebaseFirestore.instance;
  DocumentReference roomRef = db.collection('rooms').doc('$roomId');
  var roomSnapshot = await roomRef.get();
  log('Got room ${roomSnapshot.exists}');

  if (roomSnapshot.exists) {
    log('Create PeerConnection with configuration: $configuration');
    peerConnection = await createPeerConnection(configuration);

    registerPeerConnectionListeners();

    localStream?.getTracks()?.forEach((track) {
      peerConnection?.addTrack(track, localStream);
    });

    // Code for collecting ICE candidates below
    var calleeCandidatesCollection = roomRef.collection('calleeCandidates');
    peerConnection.onIceCandidate = (RTCIceCandidate candidate) {
      if (candidate == null) {
        log('onIceCandidate: complete!');
        return;
      }
      log('onIceCandidate: ${candidate.toMap()}');
      calleeCandidatesCollection.add(candidate.toMap());
    };
    // Code for collecting ICE candidate above

    peerConnection?.onTrack = (RTCTrackEvent event) {
      log('Got remote track: ${event.streams[0]}');
      event.streams[0].getTracks().forEach((track) {
        log('Add a track to the remoteStream: $track');
        remoteStream?.addTrack(track);
      });
    };

    // Code for creating SDP answer below
    var data = roomSnapshot.data();
    log('Got offer $data');
    var offer = data['offer'];
    await peerConnection?.setRemoteDescription(
      RTCSessionDescription(offer['sdp'], offer['type']),
    );
    var answer = await peerConnection.createAnswer();
    log('Created Answer $answer');

    await peerConnection.setLocalDescription(answer);

    Map<String, dynamic> roomWithAnswer = {
      'answer': {'type': answer.type, 'sdp': answer.sdp}
    };

    await roomRef.update(roomWithAnswer);
    // Finished creating SDP answer

    // Listening for remote ICE candidates below
    roomRef.collection('callerCandidates').snapshots().listen((snapshot) {
      snapshot.docChanges.forEach((document) {
        var data = document.doc.data();
        // log(data);
        log('Got new remote ICE candidate: $data');
        peerConnection.addCandidate(
          RTCIceCandidate(
            data['candidate'],
            data['sdpMid'],
            data['sdpMLineIndex'],
          ),
        );
      });
    });
  }
}

Future<void> openUserMedia(RTCVideoRenderer localVideo,
    RTCVideoRenderer remoteVideo, BuildContext contextt) async {
  context = contextt;
  chatProvider = Provider.of<ChatProvider>(context, listen: false);

  var stream = await navigator.mediaDevices.getUserMedia({'audio': true});

  localVideo.srcObject = stream;
  localStream = stream;

  remoteVideo.srcObject = await createLocalMediaStream('key');
}

标签: flutterdartaudiowebrtcspeaker

解决方案


推荐阅读