Ar filters #1746
Replies: 5 comments
-
Hello, Can you share your code used to implement it? |
Beta Was this translation helpful? Give feedback.
-
Hi, here is my code, XImageProcessorSource is an implementation of snap.camerakit , XImageProcessorSource already contains a surface, but I can't figure out how to attach it to rtmp I would be very grateful for any help, if need be I can open access to the repository @SuppressLint("RememberReturnType")
@Composable
fun SnapCameraView(
service: NewRTMPService,
apiToken: String = "***",
groupIds: String = "***",
) {
val context = LocalContext.current
val lifecycleOwner = LocalLifecycleOwner.current
val processorSource = remember {
XImageProcessorSource(context, lifecycleOwner).apply {
}
}
// processorSource.takePhoto { }
val cameraKitSession = remember { MutableStateFlow<Session?>(null) }
LaunchedEffect(Unit) {
processorSource.setService(service)
processorSource.startPreview(true)
// delay(5000)
// service.genericStream.startPreview(processorSource.surface!!,1920,1080)
}
AndroidView(
factory = { ctx ->
LayoutInflater.from(ctx).inflate(R.layout.camera_layout, null).apply {
val viewStub = findViewById<ViewStub>(R.id.camera_kit_stub)
cameraKitSession.value = Session(context = ctx) {
apiToken(apiToken)
imageProcessorSource(processorSource)
attachTo(viewStub)
}.apply {
lenses.repository.observe(
LensesComponent.Repository.QueryCriteria.Available(groupIds)
) { result ->
result.whenHasSome { myLenses ->
val lens = myLenses.get(10)
Timber.d("Lens applied: ${myLenses.size}")
lenses.processor.apply(lens)
}
}
}
}
// CameraLayout(ctx).apply {
// configureSession {
// apiToken(apiToken)
// }
// configureLensesCarousel {
// observedGroupIds = setOf(groupIds)
// }
//
// }
}
)
} import ai.deepar.ar.CameraResolutionPreset
import ai.deepar.ar.DeepARImageFormat
import android.Manifest
import android.annotation.SuppressLint
import android.content.Context
import android.content.pm.PackageManager
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.ImageFormat
import android.graphics.PixelFormat
import android.graphics.Rect
import android.graphics.SurfaceTexture
import android.hardware.camera2.CameraCharacteristics
import android.hardware.camera2.CameraManager
import android.media.Image
import android.media.ImageReader
import android.media.MediaCodec
import android.media.MediaFormat
import android.os.Environment
import android.os.Handler
import android.os.Looper
import android.util.Log
import android.util.Size
import android.util.SizeF
import android.view.Surface
import android.view.WindowManager
import androidx.annotation.CheckResult
import androidx.annotation.MainThread
import androidx.annotation.OptIn
import androidx.camera.camera2.interop.Camera2CameraInfo
import androidx.camera.camera2.interop.ExperimentalCamera2Interop
import androidx.camera.core.AspectRatio
import androidx.camera.core.Camera
import androidx.camera.core.CameraInfo
import androidx.camera.core.CameraSelector
import androidx.camera.core.CameraSelector.LensFacing
import androidx.camera.core.DisplayOrientedMeteringPointFactory
import androidx.camera.core.ExperimentalGetImage
import androidx.camera.core.FocusMeteringAction
import androidx.camera.core.ImageAnalysis
import androidx.camera.core.ImageCapture
import androidx.camera.core.ImageProxy
import androidx.camera.core.Preview
import androidx.camera.core.SurfaceOrientedMeteringPointFactory
import androidx.camera.core.UseCaseGroup
import androidx.camera.core.ViewPort
import androidx.camera.core.ZoomState
import androidx.camera.core.impl.LensFacingCameraFilter
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.core.content.ContextCompat
import androidx.lifecycle.Lifecycle
import androidx.lifecycle.LifecycleOwner
import com.of.live.DeepArActivity
import com.of.live.DeepArActivity.Companion.NUMBER_OF_BUFFERS
import com.of.live.screens.camera.ar.BufferVideoSource
import com.of.live.screens.camera.ar.RGABSource
import com.of.live.service.CameraXSource
import com.of.live.service.CameraXSourceDeepAR
import com.of.live.service.CameraXSourceDeepAR.Companion
import com.of.live.service.NewRTMPService
import com.pedro.encoder.utils.yuv.YUVUtil
import com.pedro.encoder.utils.yuv.YUVUtil.ARGBtoYUV420SemiPlanar
import com.snap.camerakit.ImageProcessor
import com.snap.camerakit.Source
import com.snap.camerakit.UserProcessor
import com.snap.camerakit.common.Consumer
import com.snap.camerakit.connectOutput
import com.snap.camerakit.invoke
import com.snap.camerakit.processBitmap
import com.snap.camerakit.support.camera.AllowsCameraFlash
import com.snap.camerakit.support.camera.AllowsCameraFocus
import com.snap.camerakit.support.camera.AllowsCameraPreview
import com.snap.camerakit.support.camera.AllowsCameraZoom
import com.snap.camerakit.support.camera.AllowsPhotoCapture
import com.snap.camerakit.support.camera.AllowsSnapshotCapture
import com.snap.camerakit.support.camera.AllowsVideoCapture
import com.snap.camerakit.support.camera.Crop
import com.snap.camerakit.support.camera.captureSize
import com.snap.camerakit.support.camera.rotatedTextureSize
import com.snap.camerakit.support.camera.waitFor
import com.snap.camerakit.toBitmap
import timber.log.Timber
import java.io.Closeable
import java.io.File
import java.io.InputStream
import java.io.OutputStream
import java.io.PipedInputStream
import java.io.PipedOutputStream
import java.lang.Math.toDegrees
import java.lang.Math.toRadians
import java.lang.ref.WeakReference
import java.nio.ByteBuffer
import java.nio.ByteOrder
import java.util.UUID
import java.util.concurrent.CountDownLatch
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
import java.util.concurrent.Future
import java.util.concurrent.RejectedExecutionException
import java.util.concurrent.TimeUnit
import java.util.concurrent.atomic.AtomicReference
import kotlin.math.atan
import kotlin.math.atan2
import kotlin.math.max
import kotlin.math.min
import kotlin.math.tan
private const val TAG = "XImageProcessorSource"
private const val DEFAULT_PREVIEW_STOP_TIMEOUT_SECONDS = 5L
private val EMPTY_CLOSEABLE = Closeable { }
private val CENTER_FOCUS_POINT: FocusMeteringAction = FocusMeteringAction.Builder(
SurfaceOrientedMeteringPointFactory(1f, 1f).createPoint(0.5f, 0.5f)
).disableAutoCancel().build()
private typealias InputWithOptions = Pair<ImageProcessor.Input, Set<ImageProcessor.Input.Option>>
/**
* A simple implementation of [Source] for [ImageProcessor] which allows to start camera preview streaming frames
* that are delivered to [com.snap.camerakit.Session.processor]. It demonstrates how to take a snapshot
* image (photo) using [ImageProcessor.toBitmap] helper method that utilizes [ImageReader] under the hood. Also, since
* CameraKit has a built-in support to render processed output to a video file, this class leverages it in [takeVideo].
* If the CameraKit built-in video recording support is not suitable, one can implement it using a similar approach to
* photo snapshot by connecting [android.media.MediaRecorder] surface as another output to record continuous frames
* into a video file.
*/
class XImageProcessorSource @JvmOverloads constructor(
context: Context,
lifecycleOwner: LifecycleOwner,
private val executorService: ExecutorService = Executors.newSingleThreadExecutor(),
private val videoOutputDirectory: File =
context.getExternalFilesDir(Environment.DIRECTORY_MOVIES) ?: context.filesDir,
private inline val mainLooperProvider: () -> Looper = { Looper.getMainLooper() }
) : Source<ImageProcessor>,
AllowsCameraPreview,
AllowsSnapshotCapture,
AllowsPhotoCapture,
AllowsVideoCapture,
AllowsCameraFocus,
AllowsCameraZoom,
AllowsCameraFlash {
private val applicationContext: Context = context.applicationContext
private val lifecycleOwnerWeakRef = WeakReference(lifecycleOwner)
private val mainExecutor = ContextCompat.getMainExecutor(applicationContext)
private val lastImageProcessor = AtomicReference<ImageProcessor>()
private var imageProcessorInputConnection = AtomicReference<Closeable>()
private val connectedImageProcessorInput = AtomicReference<InputWithOptions>()
private val waitingForImageProcessorTask = AtomicReference<Future<*>>()
private val userOperationTask = AtomicReference<Future<*>>()
private var cameraProvider: ProcessCameraProvider? = null
private var preview: Preview? = null
private var previewOutput: PreviewOutput? = null
private var imageCapture: ImageCapture? = null
private var camera: Camera? = null
private var zoomState: ZoomState? = null
private var activePreviewRequest: PreviewRequest? = null
var surface: Surface? = null
private var buffers: Array<ByteBuffer?>? = null
private var currentBuffer = 0
private var allocatedBufferSize = 0
private var flashConfiguration: AllowsCameraFlash.FlashConfiguration =
AllowsCameraFlash.FlashConfiguration.Disabled
private var previousFocusAction: FocusMeteringAction = CENTER_FOCUS_POINT
private var rtmpService: NewRTMPService? = null
override fun attach(processor: ImageProcessor): Closeable {
lastImageProcessor.set(processor)
return Closeable {
if (lastImageProcessor.compareAndSet(processor, null)) {
val mainLooper = mainLooperProvider()
if (mainLooper.thread === Thread.currentThread()) {
stopPreview()
} else {
val latch = CountDownLatch(1)
Handler(mainLooper).postAtFrontOfQueue {
stopPreview()
latch.countDown()
}
if (!latch.await(DEFAULT_PREVIEW_STOP_TIMEOUT_SECONDS, TimeUnit.SECONDS)) {
Log.w(TAG, "Timed out while waiting to stop camera preview")
}
}
} else {
throw IllegalStateException("Unexpected ImageProcessor set before it was cleared")
}
}
}
fun setService(newRTMPService: NewRTMPService) {
rtmpService = newRTMPService
}
@SuppressLint("UnsafeOptInUsageError")
@MainThread
override fun startPreview(
configuration: AllowsCameraPreview.Configuration,
inputOptions: Set<ImageProcessor.Input.Option>,
callback: (succeeded: Boolean) -> Unit
) {
val previewRequest =
PreviewRequest(configuration, inputOptions)
if (activePreviewRequest != previewRequest) {
stopPreview()
activePreviewRequest = previewRequest
} else {
// Preview is already running or initializing.
callback(true)
return
}
val cameraProviderFuture = ProcessCameraProvider.getInstance(applicationContext)
cameraProviderFuture.addListener(
{
lifecycleOwnerWeakRef.get()?.let { lifecycleOwner ->
if (lifecycleOwner.lifecycle.currentState != Lifecycle.State.DESTROYED &&
activePreviewRequest == previewRequest
) {
cameraProviderFuture.get().let { cameraProvider ->
this.cameraProvider = cameraProvider
val lensFacing = if (configuration.facingFront) {
CameraSelector.LENS_FACING_FRONT
} else {
CameraSelector.LENS_FACING_BACK
}
val cameraSelector = CameraSelector.Builder()
.apply {
try {
// Some devices (Android TV) might have only a single (usb) camera attached
// resulting in requireLensFacing failing due to no camera with a specified
// lens facing found - the lens facing for such cameras is typically null
// and we would like to be able to open them nevertheless.
addCameraFilter(
LenientLensFacingCameraFilter(
lensFacing
)
)
} catch (e: ClassNotFoundException) {
Timber.w(
e, "Failed to create LenientLensFacingCameraFilter, " +
"falling back to requireLensFacing($lensFacing)"
)
requireLensFacing(lensFacing)
}
}
.build()
@AspectRatio.Ratio val aspectRatio = when (configuration) {
is AllowsCameraPreview.Configuration.Default -> {
configurationRatioToCameraXRatio(
configuration.aspectRatio
)
}
else -> {
AspectRatio.RATIO_16_9
}
}
val preview = Preview.Builder()
.setTargetAspectRatio(aspectRatio)
.setTargetRotation(applicationContext.displayRotation)
.build()
.apply {
setSurfaceProvider(
createSurfaceProviderFor(cameraSelector, previewRequest)
)
}
//
this.preview = preview
val imageCapture = ImageCapture.Builder()
.setCaptureMode(ImageCapture.CAPTURE_MODE_MINIMIZE_LATENCY)
.setTargetAspectRatio(aspectRatio)
.build()
this.imageCapture = imageCapture
val crop = when (configuration) {
is AllowsCameraPreview.Configuration.Default -> configuration.crop
else -> Crop.None
}
val viewPort = when (crop) {
is Crop.Center -> {
ViewPort.Builder(
crop.aspectRatio,
preview.targetRotation
).build()
}
else -> null
}
val cameraResolutionPreset = CameraResolutionPreset.P1920x1080
val imageAnalysis = ImageAnalysis.Builder()
.setTargetResolution(
Size(
cameraResolutionPreset.width,
cameraResolutionPreset.height
)
)
.setBackpressureStrategy(ImageAnalysis.STRATEGY_KEEP_ONLY_LATEST)
.setOutputImageFormat(ImageAnalysis.OUTPUT_IMAGE_FORMAT_YUV_420_888)
.build()
val imageAnalyzer = ImageAnalysis.Analyzer { image ->
Timber.i("test:${image.format}")
processImage(image)
image.close()
}
imageAnalysis.setAnalyzer(
ContextCompat.getMainExecutor(applicationContext),
imageAnalyzer
)
val useCaseGroup = UseCaseGroup.Builder()
.addUseCase(preview)
.addUseCase(imageCapture)
.addUseCase(imageAnalysis)
.apply {
if (viewPort != null) {
setViewPort(viewPort)
}
}
.build()
camera = cameraProvider
.bindToLifecycle(lifecycleOwner, cameraSelector, useCaseGroup)
.apply {
cameraInfo.zoomState.observe(lifecycleOwner) {
// NOTE: ZoomState is not synchronized with Camera2 frame results,
// for more information see:
// https://partnerissuetracker.corp.google.com/issues/169938468
zoomState = it
}
}
callback(true)
}
} else {
callback(false)
}
} ?: callback(false)
},
mainExecutor
)
}
private fun processImage(image: ImageProxy) {
val yuvBuffer = imageToNV21(image) // Преобразуем в байтовый массив NV21
(rtmpService?.genericStream?.videoSource as? BufferVideoSource)?.setBufferNV21(
yuvBuffer
)
}
// Конвертируем ImageProxy в NV21
private fun imageToNV21(image: ImageProxy): ByteArray {
val yBuffer = image.planes[0].buffer
val uBuffer = image.planes[1].buffer
val vBuffer = image.planes[2].buffer
val ySize = yBuffer.remaining()
val uSize = uBuffer.remaining()
val vSize = vBuffer.remaining()
val nv21 = ByteArray(ySize + uSize + vSize)
// Копируем Y
yBuffer.get(nv21, 0, ySize)
// Копируем UV (переставляем местами, так как в NV21 сначала V, потом U)
val uvOffset = ySize
vBuffer.get(nv21, uvOffset, vSize)
uBuffer.get(nv21, uvOffset + vSize, uSize)
return nv21
}
@OptIn(ExperimentalGetImage::class)
private fun superTest(image: ImageProxy) {
if (image.format == 1) {
val planes = image.image!!.planes
Timber.i("test planes:${planes.size}")
if (planes.isEmpty()) {
Timber.e("Не удалось получить плоскости из изображения")
return
}
val plane0 = planes!![0]
val byteBuffer: ByteBuffer = plane0.buffer
val byteArray = ByteArray(byteBuffer.capacity())
byteBuffer.get(byteArray)
// val intBuffer = byteBuffer.asIntBuffer()
// val intArray = IntArray(intBuffer.remaining())
// intBuffer.get(intArray)
(rtmpService?.genericStream?.videoSource as? BufferVideoSource)?.setBufferNV21(
byteArray
)
}
}
@OptIn(ExperimentalGetImage::class)
private fun superTest2(image: ImageProxy) {
if (image.format == 35) {
val yBuffer = image.planes[0].buffer
val uBuffer = image.planes[1].buffer
val vBuffer = image.planes[2].buffer
val ySize = yBuffer.remaining()
val uSize = uBuffer.remaining()
val vSize = vBuffer.remaining()
val imageBufferSize = ySize + uSize + vSize
if (allocatedBufferSize < imageBufferSize) {
if (buffers == null) {
buffers = arrayOfNulls(DeepArActivity.NUMBER_OF_BUFFERS)
}
for (i in 0 until DeepArActivity.NUMBER_OF_BUFFERS) {
buffers!![i] = ByteBuffer.allocateDirect(imageBufferSize)
buffers!![i]?.order(ByteOrder.nativeOrder())
buffers!![i]?.position(0)
}
allocatedBufferSize = imageBufferSize
}
val byteData = ByteArray(imageBufferSize)
val width = image.width
val yStride = image.planes[0].rowStride
val uStride = image.planes[1].rowStride
val vStride = image.planes[2].rowStride
var outputOffset = 0
if (width == yStride) {
yBuffer[byteData, outputOffset, ySize]
outputOffset += ySize
} else {
var inputOffset = 0
while (inputOffset < ySize) {
yBuffer.position(inputOffset)
yBuffer[byteData, outputOffset, min(
yBuffer.remaining().toDouble(),
width.toDouble()
)
.toInt()]
outputOffset += width
inputOffset += yStride
}
}
//U and V are swapped
if (width == vStride) {
vBuffer[byteData, outputOffset, vSize]
outputOffset += vSize
} else {
var inputOffset = 0
while (inputOffset < vSize) {
vBuffer.position(inputOffset)
vBuffer[byteData, outputOffset, min(
vBuffer.remaining().toDouble(),
width.toDouble()
)
.toInt()]
outputOffset += width
inputOffset += vStride
}
}
if (width == uStride) {
uBuffer[byteData, outputOffset, uSize]
outputOffset += uSize
} else {
var inputOffset = 0
while (inputOffset < uSize) {
uBuffer.position(inputOffset)
uBuffer[byteData, outputOffset, min(
uBuffer.remaining().toDouble(),
width.toDouble()
)
.toInt()]
outputOffset += width
inputOffset += uStride
}
}
buffers!![currentBuffer]!!.clear()
buffers!![currentBuffer]!!.put(byteData)
buffers!![currentBuffer]!!.position(0)
(rtmpService?.genericStream?.videoSource as? BufferVideoSource)?.setBufferNV21(
buffers!![currentBuffer]!!
)
currentBuffer = (currentBuffer + 1) % DeepArActivity.NUMBER_OF_BUFFERS
}
}
private fun initializeBuffers(size: Int) {
if (buffers == null) {
buffers = arrayOfNulls(NUMBER_OF_BUFFERS)
}
for (i in 0 until NUMBER_OF_BUFFERS) {
buffers!![i] = ByteBuffer.allocateDirect(size)
buffers!![i]?.order(ByteOrder.nativeOrder())
buffers!![i]?.position(0)
}
allocatedBufferSize = size
}
@MainThread
override fun stopPreview() {
waitingForImageProcessorTask.getAndSet(null)?.cancel(true)
imageProcessorInputConnection.getAndSet(null)?.close()
connectedImageProcessorInput.set(null)
val processCameraProvider = cameraProvider
val useCasesToUnbind = listOfNotNull(imageCapture, preview)
activePreviewRequest = null
cameraProvider = null
preview = null
previewOutput = null
imageCapture = null
camera = null
zoomState = null
processCameraProvider?.unbind(*useCasesToUnbind.toTypedArray())
}
@MainThread
override fun takeVideo(onAvailable: (File) -> Unit): Closeable {
return takeVideo(Consumer(onAvailable))
}
@MainThread
fun takeVideo(onAvailable: Consumer<File>): Closeable {
return previewOutput?.run {
val rotationRelativeToDisplay =
getRotationRelativeToDisplay(rotationDegrees, facingFront)
val size = processedTextureSize.captureSize(
rotationDegrees = rotationRelativeToDisplay,
useDisplayRatio = true,
context = applicationContext
)
var resultFile: File? = null
var connection = EMPTY_CLOSEABLE
var flash = EMPTY_CLOSEABLE
if (flashConfiguration is AllowsCameraFlash.FlashConfiguration.Enabled && !facingFront) {
flash = enableCameraFlash()
}
Timber.i("mmmm Start")
userOperationTask.getAndSet(
executorService.submit {
Timber.i("mmmm Start2")
lastImageProcessor.waitFor { processor ->
Timber.i("mmmm Start3")
val outputFile = File(videoOutputDirectory, "${UUID.randomUUID()}.mp4")
val hasAudioRecordingPermission = ContextCompat.checkSelfPermission(
applicationContext, Manifest.permission.RECORD_AUDIO
) == PackageManager.PERMISSION_GRANTED
Timber.i("mmmm Start4")
connection = processor.connectOutput(
outputFile,
size.width,
size.height,
captureAudio = hasAudioRecordingPermission
)
resultFile = outputFile
}
}
)?.cancel(true)
Timber.i("mmmm Start5")
Closeable {
userOperationTask.getAndSet(
executorService.submit {
connection.close()
resultFile?.let(onAvailable::accept)
}
)?.cancel(true)
flash.close()
}
} ?: EMPTY_CLOSEABLE
}
@MainThread
fun takeVideo2(onAvailable: Consumer<File>): Closeable {
return previewOutput?.run {
val rotationRelativeToDisplay =
getRotationRelativeToDisplay(rotationDegrees, facingFront)
val size = processedTextureSize.captureSize(
rotationDegrees = rotationRelativeToDisplay,
useDisplayRatio = true,
context = applicationContext
)
var connection = EMPTY_CLOSEABLE
var flash = EMPTY_CLOSEABLE
if (flashConfiguration is AllowsCameraFlash.FlashConfiguration.Enabled && !facingFront) {
flash = enableCameraFlash()
}
Timber.i("Start recording...")
// val tempFile = File.createTempFile("temp_video", ".mp4", applicationContext.cacheDir)
val tempFile = File(videoOutputDirectory, "${UUID.randomUUID()}.mp4")
userOperationTask.getAndSet(
executorService.submit {
lastImageProcessor.waitFor { processor ->
val hasAudioRecordingPermission = ContextCompat.checkSelfPermission(
applicationContext, Manifest.permission.RECORD_AUDIO
) == PackageManager.PERMISSION_GRANTED
Timber.i("Connecting output to temp file: ${tempFile.absolutePath}")
connection = processor.connectOutput(
tempFile,
size.width,
size.height,
captureAudio = hasAudioRecordingPermission
)
// Передаём файл в callback, чтобы можно было начать стримить
onAvailable.accept(tempFile)
}
}
)?.cancel(true)
Closeable {
userOperationTask.getAndSet(
executorService.submit {
connection.close()
// Опционально: Удаляем файл после завершения
if (tempFile.exists()) {
// tempFile.delete()
}
}
)?.cancel(true)
flash.close()
}
} ?: EMPTY_CLOSEABLE
}
@MainThread
override fun takePhoto(onAvailable: (Bitmap) -> Unit) {
takePhoto(Consumer(onAvailable))
}
@MainThread
fun takePhoto(onAvailable: Consumer<Bitmap> = Consumer {}) {
imageCapture?.run {
val enableFlashAndFocus =
if (flashConfiguration is AllowsCameraFlash.FlashConfiguration.Enabled) {
flashMode = ImageCapture.FLASH_MODE_ON
focusAndMeterOn(previousFocusAction)
} else {
flashMode = ImageCapture.FLASH_MODE_OFF
EMPTY_CLOSEABLE
}
takePicture(
executorService,
object : ImageCapture.OnImageCapturedCallback() {
override fun onCaptureSuccess(image: ImageProxy) {
enableFlashAndFocus.close()
lastImageProcessor.waitFor { processor ->
connectedImageProcessorInput.waitFor { (input, options) ->
val bitmap = image.use { it.toBitmap() }
val resultBitmap = processor.processBitmap(
input,
bitmap,
mirrorHorizontally = options.contains(
ImageProcessor.Input.Option.MirrorFramesHorizontally
),
mirrorVertically = options.contains(
ImageProcessor.Input.Option.MirrorFramesVertically
),
allowDownscaling = true
)
// processBitmap returns the source bitmap as a result if there is no effect applied
// and source bitmap should not be transformed. In this case, the source bitmap should
// not be recycled.
if (resultBitmap !== bitmap) {
bitmap.recycle()
}
if (resultBitmap != null) {
onAvailable.accept(resultBitmap)
}
}
}
}
}
)
}
}
@MainThread
override fun takeSnapshot(onAvailable: (Bitmap) -> Unit) {
takeSnapshot(Consumer(onAvailable))
}
@MainThread
fun takeSnapshot(onAvailable: Consumer<Bitmap> = Consumer {}) {
previewOutput?.run {
var enableFlashAndFocus = EMPTY_CLOSEABLE
if (flashConfiguration is AllowsCameraFlash.FlashConfiguration.Enabled) {
camera?.let {
val focus = focusAndMeterOn(previousFocusAction)
enableFlashAndFocus = if (!facingFront) {
val flash = enableCameraFlash()
Closeable {
focus.close()
flash.close()
}
} else {
// Only focus if front facing since front facing flash should be turned on in UI
focus
}
}
}
val rotationRelativeToDisplay =
getRotationRelativeToDisplay(rotationDegrees, facingFront)
val size = processedTextureSize.captureSize(rotationRelativeToDisplay)
userOperationTask.getAndSet(
executorService.submit {
// Adding delay to allow for flash to turn on and camera to adjust before taking picture.
// Only if flash enabled and camera is back facing, UI handles front flash.
if (flashConfiguration is AllowsCameraFlash.FlashConfiguration.Enabled && !facingFront) {
// Using TimeUnit#sleep() to create delay since
// ScheduledExecutorService is not available in public API.
val flashEnabled =
flashConfiguration as AllowsCameraFlash.FlashConfiguration.Enabled
val duration = flashEnabled.delayDuration
val timeUnit = flashEnabled.delayDurationTimeUnit
timeUnit.sleep(duration)
}
lastImageProcessor.waitFor { processor ->
val bitmap = processor.toBitmap(size.width, size.height)
if (bitmap != null) {
onAvailable.accept(bitmap)
}
}
enableFlashAndFocus.close()
}
)?.cancel(true)
}
}
/**
* @since 1.8.0
*/
@MainThread
override fun focusAndMeterOn(x: Float, y: Float, viewSize: Size): Closeable {
return camera?.run {
previewOutput?.let { previewOutput ->
val windowManager =
applicationContext.getSystemService(Context.WINDOW_SERVICE) as WindowManager
val display = windowManager.defaultDisplay
val textureSize =
previewOutput.textureSize.rotatedTextureSize(previewOutput.rotationDegrees)
val viewAspectRatio = viewSize.width.toFloat() / viewSize.height.toFloat()
val cameraPreviewAspectRatio =
textureSize.width.toFloat() / textureSize.height.toFloat()
val scaleFactor = viewAspectRatio / cameraPreviewAspectRatio
var fullWidth = viewSize.width.toFloat()
var fullHeight = viewSize.height.toFloat()
if (scaleFactor < 1f) {
fullWidth /= scaleFactor
} else {
fullHeight *= scaleFactor
}
val factory =
DisplayOrientedMeteringPointFactory(display, cameraInfo, fullWidth, fullHeight)
val meteringPoint = factory.createPoint(x, y)
val focusAction = FocusMeteringAction.Builder(meteringPoint)
.disableAutoCancel()
.build()
previousFocusAction = focusAction
cameraControl.startFocusAndMetering(focusAction)
}
Closeable {
previousFocusAction = CENTER_FOCUS_POINT
cameraControl.cancelFocusAndMetering()
}
} ?: EMPTY_CLOSEABLE
}
/**
* @since 1.8.3
*/
@MainThread
fun focusAndMeterOn(focusMeteringAction: FocusMeteringAction): Closeable {
return camera?.run {
cameraControl.startFocusAndMetering(focusMeteringAction)
previousFocusAction = focusMeteringAction
Closeable {
previousFocusAction = CENTER_FOCUS_POINT
cameraControl.cancelFocusAndMetering()
}
} ?: EMPTY_CLOSEABLE
}
@MainThread
override fun zoomBy(factor: Float) {
zoomState?.let { state ->
val nextZoomRatio =
max(state.minZoomRatio, min(state.maxZoomRatio, state.zoomRatio * factor))
camera?.run {
cameraControl.setZoomRatio(nextZoomRatio)
}
}
}
/**
* @since 1.8.3
*/
@MainThread
override fun useFlashConfiguration(flashConfiguration: AllowsCameraFlash.FlashConfiguration) {
this.flashConfiguration = flashConfiguration
}
/**
* @since 1.8.3
*/
@MainThread
override fun enableCameraFlash(): Closeable {
return camera?.run {
cameraControl.enableTorch(true)
Closeable {
cameraControl.enableTorch(false)
}
} ?: EMPTY_CLOSEABLE
}
@SuppressLint("RestrictedApi") // no other way to get camera info from CameraX
@ExperimentalCamera2Interop
private fun createSurfaceProviderFor(
cameraSelector: CameraSelector,
previewRequest: PreviewRequest,
) = Preview.SurfaceProvider { surfaceRequest ->
Timber.i("vxcvxcv 11")
if (this.activePreviewRequest !== previewRequest) {
Log.w(TAG, "Concurrent start camera preview requests.")
surfaceRequest.willNotProvideSurface()
return@SurfaceProvider
}
Timber.i("vxcvxcv 22")
val cameraProvider = cameraProvider
if (cameraProvider == null) {
Log.w(TAG, "No camera provider present to get camera info.")
surfaceRequest.willNotProvideSurface()
return@SurfaceProvider
}
Timber.i("vxcvxcv 33")
val cameraInfo = cameraSelector.filter(cameraProvider.availableCameraInfos).firstOrNull()
if (cameraInfo == null) {
Log.w(TAG, "Could not find camera info that matches the camera selector.")
surfaceRequest.willNotProvideSurface()
return@SurfaceProvider
}
Timber.i("vxcvxcv 44")
val cameraId = Camera2CameraInfo.from(cameraInfo).cameraId
val cameraManager: CameraManager = applicationContext
.getSystemService(Context.CAMERA_SERVICE) as CameraManager
val characteristics = cameraManager.getCameraCharacteristics(cameraId)
val fieldOfView = characteristics.fieldOfView(DEFAULT_FIELD_OF_VIEW)
val cropRect = preview?.viewPortCropRect
val resolution = CameraResolutionPreset.P1920x1080
Timber.i("vxcvxcv 55")
previewOutput = PreviewOutput(
previewRequest.configuration.facingFront,
Size(resolution.width, resolution.height),
cropRect,
cameraInfo.sensorRotationDegrees
)
Timber.i("vxcvxcv 66")
val surfaceTexture = SurfaceTexture(0).apply {
setDefaultBufferSize(resolution.width, resolution.height)
detachFromGLContext()
}
Timber.i("vxcvxcv 77")
// surfaceTexture.setOnFrameAvailableListener {
// surfaceTexture.updateTexImage()
// Timber.i("vxcvxcv")
// }
Timber.i("vxcvxcv 88")
surface = Surface(surfaceTexture)
surfaceRequest.provideSurface(surface!!, mainExecutor) {
surface!!.release()
surfaceTexture.release()
}
// rtmpService?.startPreview(surfaceRequest.deferrableSurface.surface)
val inputOptions = if (cropRect == null) {
previewRequest.inputOptions
} else {
previewRequest.inputOptions + ImageProcessor.Input.Option.Crop.Center(
cropRect.width(),
cropRect.height()
)
}
Timber.i("vxcvxcv 99")
tryConnect(
ImageProcessor.Input(
surfaceTexture,
resolution.width,
resolution.height,
cameraInfo.getSensorRotationDegrees(applicationContext.displayRotation),
previewRequest.configuration.facingFront,
{ applyZoomRatio(fieldOfView.width) },
{ applyZoomRatio(fieldOfView.height) }
),
inputOptions
)
}
private fun tryConnect(input: ImageProcessor.Input, options: Set<ImageProcessor.Input.Option>) {
try {
waitingForImageProcessorTask.getAndSet(
executorService.submit {
lastImageProcessor.waitFor { processor ->
imageProcessorInputConnection.getAndSet(
processor.connectInput(
input,
options
)
)?.close()
connectedImageProcessorInput.set(input to options)
}
}
)?.cancel(true)
} catch (e: RejectedExecutionException) {
Log.w(
TAG,
"Could not connect new Input to ImageProcessor due to ExecutorService shutdown", e
)
}
}
private fun getRotationRelativeToDisplay(rotationDegrees: Int, facingFront: Boolean): Int {
val displayRotation = surfaceRotationToDegrees(applicationContext.displayRotation)
return getRelativeImageRotation(
displayRotation,
rotationDegrees,
!facingFront
)
}
private fun applyZoomRatio(fov: Float): Float {
return zoomState?.zoomRatio?.let { zoomRatio ->
(2 * toDegrees(atan(tan(toRadians(fov / 2.toDouble())) / zoomRatio))).toFloat()
} ?: fov
}
}
private val DEFAULT_FIELD_OF_VIEW = SizeF(59f, 42f)
private data class PreviewRequest(
val configuration: AllowsCameraPreview.Configuration,
val inputOptions: Set<ImageProcessor.Input.Option>,
)
@AspectRatio.Ratio
private fun configurationRatioToCameraXRatio(configurationRatio: com.snap.camerakit.support.camera.AspectRatio): Int {
return when (configurationRatio) {
com.snap.camerakit.support.camera.AspectRatio.RATIO_16_9 -> {
AspectRatio.RATIO_16_9
}
com.snap.camerakit.support.camera.AspectRatio.RATIO_4_3 -> {
AspectRatio.RATIO_4_3
}
}
}
private data class PreviewOutput(
val facingFront: Boolean,
val textureSize: Size,
val textureCrop: Rect?,
val rotationDegrees: Int
)
private val PreviewOutput.processedTextureSize: Size
get() = textureCrop?.run {
Size(width(), height())
} ?: textureSize
private fun CameraCharacteristics.fieldOfView(defaultFieldOfView: SizeF): SizeF {
val focalLengths = get(CameraCharacteristics.LENS_INFO_AVAILABLE_FOCAL_LENGTHS)
val reportedSensorSize = get(CameraCharacteristics.SENSOR_INFO_PHYSICAL_SIZE)
if (focalLengths == null || focalLengths.isEmpty() || reportedSensorSize == null) {
return defaultFieldOfView
}
val sensorSize = SizeF(reportedSensorSize.width, reportedSensorSize.height)
return fieldOfViewDegrees(sensorSize, focalLengths[0], defaultFieldOfView)
}
private fun fieldOfViewDegrees(
sensorSizeMillimeters: SizeF,
focalLengthMillimeters: Float,
defaultDegrees: SizeF
): SizeF {
if (focalLengthMillimeters <= 0f ||
sensorSizeMillimeters.width <= 0f ||
sensorSizeMillimeters.height <= 0f
) {
return defaultDegrees
}
return SizeF(
(
2 * Math.toDegrees(
atan2(
(sensorSizeMillimeters.width / 2).toDouble(),
focalLengthMillimeters.toDouble()
)
)
).toFloat(),
(
2 * Math.toDegrees(
atan2(
(sensorSizeMillimeters.height / 2).toDouble(),
focalLengthMillimeters.toDouble()
)
)
).toFloat()
)
}
private val Context.displayRotation: Int
get() {
return (getSystemService(Context.WINDOW_SERVICE) as? WindowManager)?.defaultDisplay?.rotation
?: Surface.ROTATION_0
}
private fun surfaceRotationToDegrees(rotation: Int): Int {
return when (rotation) {
Surface.ROTATION_0 -> 0
Surface.ROTATION_90 -> 90
Surface.ROTATION_180 -> 180
Surface.ROTATION_270 -> 270
else -> throw IllegalArgumentException("Unsupported surface rotation: $rotation")
}
}
private fun getRelativeImageRotation(
destRotationDegrees: Int,
sourceRotationDegrees: Int,
isOppositeFacing: Boolean
): Int {
return if (isOppositeFacing) {
(sourceRotationDegrees - destRotationDegrees + 360) % 360
} else {
(sourceRotationDegrees + destRotationDegrees) % 360
}
}
private fun ImageProxy.toBitmap(): Bitmap {
val buffer = planes[0].buffer
val bytes = ByteArray(buffer.capacity())
buffer[bytes]
return BitmapFactory.decodeByteArray(bytes, 0, bytes.size)
}
@SuppressLint("RestrictedApi")
internal class LenientLensFacingCameraFilter(@LensFacing lensFacing: Int) :
LensFacingCameraFilter(lensFacing) {
override fun filter(cameraInfos: List<CameraInfo>): List<CameraInfo> {
val filtered = super.filter(cameraInfos)
return if (filtered.isEmpty()) {
cameraInfos
} else {
filtered
}
}
}
|
Beta Was this translation helpful? Give feedback.
-
I tried to use imageAnalyzer, but when transferring, I get broken video and no effects. val imageAnalyzer = ImageAnalysis.Analyzer { image ->
Timber.i("test:${image.format}")
processImage(image)
image.close()
} |
Beta Was this translation helpful? Give feedback.
-
I don't understand the code. Why are you using CameraX and snap camerakit? is it necessary to work with snap camerakit? |
Beta Was this translation helpful? Give feedback.
-
XImageProcessorSource is a VideoSource-like resource in your library. XImageProcessorSource here is an implementation of camera x, but I can't connect it to rtmp |
Beta Was this translation helpful? Give feedback.
-
Hi, once again I would like to thank you for your hard work. Also thanks for implementing DeepAr effects, but I'm struggling with snap camera kit.
There is an implementation https://github.com/Snapchat/camera-kit-android-sdk .CameraXImageProcessorSource which customizes the camera, I have a question, can I use ImageAnalysis.Analyzer to get frames that are already processed and have effects, or do I just get the camera data?
I'm trying to send ByteArray but the picture is broadcasted is broken and I can't understand if there are effects or if I'm trying to broadcast what the camera just displays.
If you can point me in the right direction, I would be very grateful.
Any help would be appreciated,
best regards
Beta Was this translation helpful? Give feedback.
All reactions