Creating a smart camera app for Android involves integrating the CameraX API (modern and easy-to-use camera library) and adding smart features like object detection, face detection, or barcode scanning using ML Kit. Below is a step-by-step guide and code for a basic smart camera app.
1. Setup Dependencies (build.gradle
)
Add the following dependencies to your build.gradle
file:
dependencies {
// CameraX
implementation "androidx.camera:camera-core:1.3.0"
implementation "androidx.camera:camera-camera2:1.3.0"
implementation "androidx.camera:camera-lifecycle:1.3.0"
implementation "androidx.camera:camera-view:1.3.0"
// ML Kit for face detection
implementation 'com.google.mlkit:face-detection:16.1.5'
// Permissions
implementation 'com.vmadalin:easypermissions-ktx:1.0.0'
}
2. XML Layout (activity_camera.xml
)
Create a layout file for the camera preview and UI elements:
<?xml version="1.0" encoding="utf-8"?>
<RelativeLayout xmlns:android="http://schemas.android.com/apk/res/android"
android:layout_width="match_parent"
android:layout_height="match_parent">
<!-- Camera Preview -->
<androidx.camera.view.PreviewView
android:id="@+id/previewView"
android:layout_width="match_parent"
android:layout_height="match_parent" />
<!-- Capture Button -->
<Button
android:id="@+id/captureButton"
android:layout_width="wrap_content"
android:layout_height="wrap_content"
android:text="Capture"
android:layout_alignParentBottom="true"
android:layout_centerHorizontal="true"
android:layout_marginBottom="16dp" />
<!-- Face Detection Overlay -->
<ImageView
android:id="@+id/faceOverlay"
android:layout_width="match_parent"
android:layout_height="match_parent"
android:visibility="invisible"
android:scaleType="matrix" />
</RelativeLayout>
3. Kotlin Code (CameraActivity.kt
)
Implement the camera and smart features:
import android.Manifest
import android.annotation.SuppressLint
import android.graphics.Bitmap
import android.graphics.BitmapFactory
import android.graphics.Matrix
import android.graphics.RectF
import android.os.Bundle
import android.util.Size
import android.widget.Button
import android.widget.ImageView
import androidx.activity.result.contract.ActivityResultContracts
import androidx.appcompat.app.AppCompatActivity
import androidx.camera.core.Camera
import androidx.camera.core.CameraSelector
import androidx.camera.core.ImageCapture
import androidx.camera.core.ImageCaptureException
import androidx.camera.core.Preview
import androidx.camera.lifecycle.ProcessCameraProvider
import androidx.camera.view.PreviewView
import androidx.core.content.ContextCompat
import com.google.common.util.concurrent.ListenableFuture
import com.google.mlkit.vision.common.InputImage
import com.google.mlkit.vision.face.Face
import com.google.mlkit.vision.face.FaceDetection
import com.google.mlkit.vision.face.FaceDetectorOptions
import java.io.File
import java.util.concurrent.ExecutorService
import java.util.concurrent.Executors
class CameraActivity : AppCompatActivity() {
private lateinit var previewView: PreviewView
private lateinit var captureButton: Button
private lateinit var faceOverlay: ImageView
private lateinit var cameraExecutor: ExecutorService
private lateinit var imageCapture: ImageCapture
private lateinit var cameraProviderFuture: ListenableFuture<ProcessCameraProvider>
private lateinit var camera: Camera
private val faceDetector = FaceDetection.getClient(
FaceDetectorOptions.Builder()
.setPerformanceMode(FaceDetectorOptions.PERFORMANCE_MODE_FAST)
.build()
)
override fun onCreate(savedInstanceState: Bundle?) {
super.onCreate(savedInstanceState)
setContentView(R.layout.activity_camera)
previewView = findViewById(R.id.previewView)
captureButton = findViewById(R.id.captureButton)
faceOverlay = findViewById(R.id.faceOverlay)
cameraExecutor = Executors.newSingleThreadExecutor()
// Request camera permissions
requestCameraPermission()
// Set up the camera
cameraProviderFuture = ProcessCameraProvider.getInstance(this)
cameraProviderFuture.addListener({
val cameraProvider = cameraProviderFuture.get()
bindCameraUseCases(cameraProvider)
}, ContextCompat.getMainExecutor(this))
// Capture button click listener
captureButton.setOnClickListener {
takePhoto()
}
}
private fun requestCameraPermission() {
val requestPermissionLauncher = registerForActivityResult(
ActivityResultContracts.RequestPermission()
) { isGranted ->
if (isGranted) {
// Permission granted, start camera
} else {
// Handle permission denied
}
}
requestPermissionLauncher.launch(Manifest.permission.CAMERA)
}
private fun bindCameraUseCases(cameraProvider: ProcessCameraProvider) {
val preview = Preview.Builder().build().also {
it.setSurfaceProvider(previewView.surfaceProvider)
}
imageCapture = ImageCapture.Builder().build()
val cameraSelector = CameraSelector.DEFAULT_BACK_CAMERA
try {
cameraProvider.unbindAll()
camera = cameraProvider.bindToLifecycle(
this, cameraSelector, preview, imageCapture
)
} catch (e: Exception) {
e.printStackTrace()
}
}
private fun takePhoto() {
val photoFile = File(externalMediaDirs.firstOrNull(), "${System.currentTimeMillis()}.jpg")
val outputOptions = ImageCapture.OutputFileOptions.Builder(photoFile).build()
imageCapture.takePicture(
outputOptions,
ContextCompat.getMainExecutor(this),
object : ImageCapture.OnImageSavedCallback {
override fun onImageSaved(output: ImageCapture.OutputFileResults) {
// Analyze the captured image for faces
val bitmap = BitmapFactory.decodeFile(photoFile.absolutePath)
detectFaces(bitmap)
}
override fun onError(exception: ImageCaptureException) {
exception.printStackTrace()
}
}
)
}
@SuppressLint("UnsafeOptInUsageError")
private fun detectFaces(bitmap: Bitmap) {
val image = InputImage.fromBitmap(bitmap, 0)
faceDetector.process(image)
.addOnSuccessListener { faces ->
// Draw face bounding boxes on the overlay
drawFaceOverlay(faces, bitmap)
}
.addOnFailureListener { e ->
e.printStackTrace()
}
}
private fun drawFaceOverlay(faces: List<Face>, bitmap: Bitmap) {
val overlayBitmap = Bitmap.createBitmap(bitmap.width, bitmap.height, Bitmap.Config.ARGB_8888)
val canvas = android.graphics.Canvas(overlayBitmap)
val paint = android.graphics.Paint().apply {
color = android.graphics.Color.RED
style = android.graphics.Paint.Style.STROKE
strokeWidth = 5f
}
for (face in faces) {
val bounds = face.boundingBox
canvas.drawRect(bounds, paint)
}
faceOverlay.setImageBitmap(overlayBitmap)
faceOverlay.visibility = ImageView.VISIBLE
}
override fun onDestroy() {
super.onDestroy()
cameraExecutor.shutdown()
}
}
4. Key Features:
- CameraX Integration: Modern camera API for preview and capture.
- Face Detection: Uses ML Kit to detect faces in captured images.
- Permissions: Handles runtime camera permissions.
- Overlay: Draws bounding boxes around detected faces.
5. Advanced Features:
- Real-Time Face Detection: Use
ImageAnalysis
in CameraX to detect faces in real-time. - Barcode Scanning: Integrate ML Kit’s barcode scanning API.
- Object Detection: Use TensorFlow Lite for custom object detection.
- Filters: Apply GPU-based filters using libraries like GPUImage.
- Save to Gallery: Save captured images to the device gallery.
6. Real-Time Face Detection:
To detect faces in real-time, replace ImageCapture
with ImageAnalysis
:
val imageAnalysis = ImageAnalysis.Builder()
.setTargetResolution(Size(1280, 720))
.build()
.also {
it.setAnalyzer(cameraExecutor) { imageProxy ->
val mediaImage = imageProxy.image
if (mediaImage != null) {
val image = InputImage.fromMediaImage(mediaImage, imageProxy.imageInfo.rotationDegrees)
faceDetector.process(image)
.addOnSuccessListener { faces ->
// Update UI with detected faces
}
.addOnFailureListener { e ->
e.printStackTrace()
}
}
imageProxy.close()
}
}
This is a foundational example. You can expand it with more advanced features like AR effects, AI-based enhancements, or custom filters. Let me know if you need help with any specific feature! 🚀