/*---------------------------------------------------------------------------* Project: Horizon File: main.cpp Copyright (C)2009-2012 Nintendo Co., Ltd. All rights reserved. These coded instructions, statements, and computer programs contain proprietary information of Nintendo of America Inc. and/or Nintendo Company Ltd., and are protected by Federal copyright law. They may not be disclosed to third parties or copied or duplicated in any form, in whole or in part, without the prior written consent of Nintendo. $Rev: 53198 $ *---------------------------------------------------------------------------*/ #include #include #include #include #include #include "demo.h" #include "demo/Render/demo_RenderSystemExt.h" #include "applet.h" #define debug_print NN_LOG //#define debug_print(...) ((void)0) //============================================================================= namespace { // ---------------------------------------- // Sample initialization and finalization void Initialize(void); void Finalize(void); // ---------------------------------------- // Functions for applet control void InitializeApplet(void); void FinalizeApplet(void); // ---------------------------------------- // Functions for camera thread control void InitializeCameraThread(void); void FinalizeCameraThread(void); void PrepareTransitionCallback(void); void AfterTransitionCallback(void); // ---------------------------------------- // Camera/Y2R control functions void SetupCamera(void); bool UpdateCamera(void); nn::Result InitializeCameraSetting(void); void InitializeY2r(void); void InitializeResource(void); void FinalizeResource(void); void CameraThreadFunc(uptr param); void CameraRecvFunc(nn::camera::Port port); void CameraVsyncFunc(nn::camera::Port port); void StopCameraCapture(void); void RestartCameraCapture(void); bool Y2rConversion(s32 index); // ---------------------------------------- // Rendering Functions int InitializeGx(void); void FinalizeGx(void); void LoadObjects(void); void DeleteObjects(void); void ReadyObjects(void); void DrawCameraImage(s32 index); void DrawDisplay0Stereo(void); void DrawDisplay0(void); void DrawDisplay1(void); void DrawDisplay0Ext(void); void DrawFrame(void); void SetTextureCombiner(void); void SetCalibration(void); } //============================================================================= /* Please see man pages for details */ namespace { // Dynamic allocation heap nn::fnd::ExpHeap s_AppHeap; // Index for cameras used in the sample enum CameraIndex { CAMERA_RIGHT, // Right camera CAMERA_LEFT, // Left camera CAMERA_NUM // Number of cameras }; // Buffer in which to transfer camera images const s32 YUV_BUFFER_NUM = 3; // Number of YUV buffers (triple buffering) per camera u8* s_paaYuvBuffer[CAMERA_NUM][YUV_BUFFER_NUM]; // Buffers to which YUV is transferred from camera (number of cameras * triple buffer) s32 s_YuvBufferSize; // YUV buffer size s32 s_aYuvCapturing[CAMERA_NUM]; // Indicates the buffer for the camera images currently being transferred s32 s_aYuvLatestCaptured[CAMERA_NUM]; // Indicates the buffer for which a recent transfer completed s32 s_aYuvReading[CAMERA_NUM]; // Indicates a buffer that is used to read data during Y2R conversion nn::os::CriticalSection s_aCsYuvSwap[CAMERA_NUM]; // Blocks used to swap buffers s32 s_YuvTransferUnit; // Amount of camera image data transferred in a single frame nn::y2r::StandardCoefficient s_Coefficient; // The Y2R conversion coefficient suitable to the data output by the camera. // The buffer into which RGB images are transferred after Y2R conversion u8* s_paRgbBuffer[CAMERA_NUM] = { NULL, NULL }; // Space for both the left and right images s32 s_RgbBufferSize; // RGB buffer size // Determinant of whether a valid RGB image exists bool s_ExistRgbImage[CAMERA_NUM] = { false, false }; // Determinant of whether RGB image has been updated bool s_IsUpdateRgbImage[CAMERA_NUM] = { false, false }; // The size of an image before it is trimmed s32 s_OriginalWidth; // The size of an image after it is trimmed s32 s_TrimmingWidth; s32 s_TrimmingHeight; // The size of the image to be pasted as a texture s32 s_TextureWidth; s32 s_TextureHeight; // V-Blank timing nn::fnd::TimeSpan saa_LatestVsyncTiming[CAMERA_NUM][2]; s64 s_VsyncTimingDifference = 0; // V-Blank interval (used to calculate the frame rate) const u8 VSYNC_INTERVAL_LOG_NUM = 4; s64 saa_VsyncInterval[CAMERA_NUM][VSYNC_INTERVAL_LOG_NUM] = { { 0, 0, 0, 0 }, { 0, 0, 0, 0 } }; s32 s_aVsyncIntervalPos[CAMERA_NUM] = { 0, 0 }; s64 s_aFps[CAMERA_NUM] = { 0, 0 }; // Number of camera frames s32 s_CameraFrameCount = 0; const s32 FRAME_NUM_FOR_STABILIZE = 30; // The thread that processes camera events nn::os::Thread s_CameraThread; // Priority of the main thread s32 s_MainThreadPriority; // Notification from the camera library that a data transfer is complete nn::os::Event s_aCameraRecvEvent[CAMERA_NUM]; // Notification from the camera library that there was a buffer error nn::os::Event s_aCameraBufferErrorEvent[CAMERA_NUM]; // Notification from the camera library that there was a V-Blank nn::os::Event s_aCameraVsyncEvent[CAMERA_NUM]; // Notification from the Y2R library that conversion is complete nn::os::Event s_Y2rEndEvent; // Queue used to collect Y2R conversion requests for camera images // The length of the queue depends on the balance between the cameras' frame rate and the amount of time taken to process Y2R conversion requests. nn::os::BlockingQueue s_Y2rRequestQueue; const s32 Y2R_REQUEST_QUEUE_LENGTH = 8; uptr s_aY2rRequestQueueMessage[Y2R_REQUEST_QUEUE_LENGTH]; // Format of a Y2R conversion request struct Y2rRequest_st { s32 index; // Right or left NN_PADDING4; nn::os::Tick time; // Time at which an image is obtained }; //struct Y2rRequest_st // Circular buffer for holding Y2R conversion requests const s32 Y2R_REQUEST_BUFFER_MAX = 3; Y2rRequest_st saa_Y2rRequestBuffer[CAMERA_NUM][Y2R_REQUEST_BUFFER_MAX]; s32 s_aY2rRequestBufferCounter[CAMERA_NUM]; // Flag used to stop the camera thread bool s_IsCameraThreadEnd = false; // Flag used to stop processing in the camera thread during Sleep Mode bool s_IsCameraThreadSleep = false; // Event used to wait for the camera thread to sleep nn::os::LightEvent s_CameraThreadSleepAckEvent; // Event used to send a notification when the camera thread wakes up nn::os::LightEvent s_CameraThreadAwakeEvent; // Indicates whether to enable camera capture running bool s_IsCameraCaptureEnable = true; // Indicates whether camera capture is running bool s_IsCameraCaptureStarted = false; // Indicates whether to activate cameras bool s_IsCameraActiveTarget = true; // Indicates whether cameras are actually active bool s_IsCameraActive = false; // Indicates whether camera Vsync synchronization is necessary bool s_IsNeedSynchronizeVsyncTiming = false; // Indicates that the cameras have finished being configured bool s_IsFinishCameraSetting = false; // Calibration data for the stereo cameras nn::camera::StereoCameraCalibrationData s_CalData; // Left/right correction matrix nn::math::MTX34 s_aCalMatrix[CAMERA_NUM]; // Image magnification ratio obtained by the correction process f32 s_CalScale = 0.0f; } //Namespace //============================================================================= /* Please see man pages for details */ namespace { // 3D-enabled demo framework demo::RenderSystemExt s_RenderSystem; // Memory size used to allocate graphics in FCRAM const s32 MEMORY_SIZE_FCRAM_GX = 0x800000; uptr s_AddrForGxHeap = 0; // buffer id GLuint s_ArrayBufferID = 0; GLuint s_ElementArrayBufferID = 0; // program id GLuint s_ProgramID = 0; // shader id GLuint s_ShaderID = 0; // Texture GLuint s_Texture[CAMERA_NUM] = { 0, 0 }; // Frame count s32 s_FrameCount = 0; } //Namespace //============================================================================== /* Please see man pages for details */ extern "C" void nnMain(void) { nn::Result result; /* Please see man pages for details */ NN_LOG("Camera demo start\n"); // Performs initialization. // In this demo, the camera thread is started and then everything is done in the camera thread, including starting/ending the camera, starting/stopping capture, and changing the settings. // // Initialize(); NN_LOG("Camera Demo: start\n"); NN_LOG("Y button : Stop/Restart capturing\n"); NN_LOG("X button : Deactivate/Activate camera\n"); NN_LOG("Start button: Finalize camera library\n"); nn::hid::PadReader padReader; nn::hid::PadStatus padStatus; // Flag for whether Y2R conversion of the camera image is completed. bool a_IsComplete[CAMERA_NUM] = { false, false }; // The time to get the YUV image for which Y2R conversion is completed. nn::os::Tick a_LatestTime[CAMERA_NUM]; bool isLoop = true; // The variable for counting the number of images that were destroyed because out-of-sync or for other reasons. s32 throwCount = 0; while (isLoop) { // ---------------------------------------- // Applet related process // The system forces the Y2R library to shut down when it transitions to Sleep Mode. // On the other hand, conversion is not resumed when the system recovers from Sleep Mode. // We therefore recommend that you transition to Sleep Mode after you have finished Y2R conversion for a single image. TransitionHandler::Process(); if (TransitionHandler::IsExitRequired()) { break; // Exit main loop if the application ends } // Get gamepad values padReader.ReadLatest(&padStatus); /* Please see man pages for details */ // Flag for skipping the wait for Vsync when rendering not performed. bool skipWaitVsync = true; // There is no reason to perform Y2R conversion when no camera images are obtained, so wait for a Y2R conversion request from the camera thread. // uptr msg; if (s_IsFinishCameraSetting && s_Y2rRequestQueue.TryDequeue(&msg)) { // Run Y2R conversion. Y2rRequest_st* p_Req = reinterpret_cast(msg); // Check the times at which images were obtained. If there is a significant difference between them, // throw out one of the frames under the assumption that the times do not indicate the same frame. s32 another = p_Req->index ^ 1; if (a_IsComplete[another]) { s64 diff = (p_Req->time - a_LatestTime[another]).ToTimeSpan().GetMilliSeconds(); if ((diff > 3) || (diff < -3)) { NN_LOG("Throw out another frame (diff=%lld msec)\n", diff); a_IsComplete[another] = false; throwCount++; } else { throwCount = 0; } // Retry when the VSync has not run properly. // This is normally impossible, but it has been included just in case. if (throwCount >= 10) { // Request the camera thread to synchronize camera-capture timing. s_IsNeedSynchronizeVsyncTiming = true; // Ask the camera thread to resume data capture and transfer. // We use a buffer error event because this is processed just like a recovery from a buffer error. s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); NN_LOG("Retry SynchronizeVsyncTiming\n"); } } // Run Y2R conversion if (Y2rConversion(p_Req->index)) { // Y2R conversion was successful, so update the flag and the time. a_LatestTime[p_Req->index] = p_Req->time; a_IsComplete[p_Req->index] = true; // Texture needs to be updated. s_IsUpdateRgbImage[p_Req->index] = true; // The content of the RGB buffer is valid. s_ExistRgbImage[p_Req->index] = true; } else { // Invalidate the texture because Y2R conversion failed and the content of the buffer might be corrupted. s_ExistRgbImage[p_Req->index] = false; } } else { // If queue not processed, wait for Vsync. skipWaitVsync = false; } // The following block handles processing when the cameras have stopped taking images. { // There are three ways to stop taking camera images. // (1) Stop the capture operations themselves // (2) Put the camera devices to sleep // (3) Shut down the camera library // (1) Stop the capture operations themselves // Stop capture operations while the cameras are still active. // Even though it takes little time to resume because the camera does not need to be reactivated, this method consumes more power than method (2). // // This is optimal when you stop capturing for a brief period of time. if ((padStatus.trigger & nn::hid::BUTTON_Y) && s_IsCameraActiveTarget) { // Reconfigure the transfer settings and ask the camera thread to resume/stop capture. // In this sample demo, activity is resumed/stopped by the camera thread. // Use a buffer error event because this is just like a recovery from a buffer error. // s_IsCameraCaptureEnable = !s_IsCameraCaptureEnable; s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); // Invalidate image before switching a_IsComplete[CAMERA_LEFT] = false; a_IsComplete[CAMERA_RIGHT] = false; } // (2) Put the camera devices to sleep // Put the camera devices to sleep and cancel image output. // Note that this is not the same concept of "sleep" handled by nn::applet. // Although this requires the cameras to be re-activated, it consumes less power than method (1). // This is optimal when you stop capturing for a long period of time. else if ((padStatus.trigger & nn::hid::BUTTON_X) && s_IsCameraCaptureEnable) { // Request the camera thread to switch the indication of whether to activate cameras. s_IsCameraActiveTarget = !s_IsCameraActiveTarget; // Reconfigure transfers and resume capture operations. // In this sample demo, processing is resumed by the camera thread. // Use a buffer error event because this is just like a recovery from a buffer error. // s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); // Invalidate image before switching a_IsComplete[CAMERA_RIGHT] = false; a_IsComplete[CAMERA_LEFT] = false; } // (3) Shut down the camera library // We shut down the camera library when we are not going to use it anymore. // If we want to use it again, we must call camera::Initialize and then re-configure every setting. // Only a single application can use the camera library/Y2R library at any given time. // So you must finalize the camera library in order for any other application or applet to use the camera/Y2R libraries. // else if ((padStatus.trigger & nn::hid::BUTTON_START) && s_IsCameraCaptureEnable && s_IsCameraActiveTarget) { // Exit the loop and shut down the camera library. isLoop = false; } } // Render the left and right camera images. // We do not render them until we have both. if (a_IsComplete[CAMERA_RIGHT] && a_IsComplete[CAMERA_LEFT]) { DrawFrame(); a_IsComplete[CAMERA_RIGHT] = false; a_IsComplete[CAMERA_LEFT] = false; // If rendered, wait for Vsync. skipWaitVsync = false; } // Render even when images are not being captured from the cameras. else if (!s_IsCameraCaptureStarted || !s_IsCameraActive) { DrawFrame(); // If rendered, wait for Vsync. skipWaitVsync = false; } if (!skipWaitVsync) { s_RenderSystem.WaitVsync(NN_GX_DISPLAY_BOTH); } } //while() Finalize(); NN_LOG("Stereo Camera Demo: End\n"); nn::applet::CloseApplication(); } //nnMain() //============================================================================= /* Please see man pages for details */ namespace { //------------------------------------------------------------ // Initialization //------------------------------------------------------------ void Initialize(void) { nn::fs::Initialize(); InitializeApplet(); // Applet process if (TransitionHandler::IsExitRequired()) { // Ends application when the POWER Button is pressed during startup FinalizeApplet(); nn::applet::CloseApplication(); } // Heap memory s_AppHeap.Initialize( nn::os::GetDeviceMemoryAddress(), // Start address nn::os::GetDeviceMemorySize()); // Memory size // ROMFS must be mounted before it can be used. const size_t ROMFS_BUFFER_SIZE = 1024 * 64; static char buffer[ROMFS_BUFFER_SIZE]; NN_PANIC_IF_FAILED(nn::fs::MountRom(16, 16, buffer, ROMFS_BUFFER_SIZE)); if (InitializeGx() < 0) { NN_PANIC("failed gx init\n"); } // We want to use the gamepad, so initialize the HID library. NN_PANIC_IF_FAILED(nn::hid::Initialize()); // Start thread for camera processing InitializeCameraThread(); } //------------------------------------------------------------ // Finalization //------------------------------------------------------------ void Finalize(void) { // Finalize thread for camera processing. FinalizeCameraThread(); nn::hid::Finalize(); FinalizeGx(); FinalizeApplet(); s_AppHeap.Finalize(); } } //Namespace //============================================================================= /* Please see man pages for details */ namespace { void SetupCamera() { nn::Result result; // Camera library initialization while (true) { // If the end flag is set, stop initialization and instead finalize. if (s_IsCameraThreadEnd) { return; } // Camera library initialization result = nn::camera::Initialize(); if (result.IsSuccess()) { break; } else if (result == nn::camera::ResultFatalError()) { // Camera restart process failed NN_PANIC("Camera has broken.\n"); } else if (result == nn::camera::ResultUsingOtherProcess()) { // Camera is being used by another process NN_PANIC("Camera is using by other process.\n"); } else if (result == nn::camera::ResultAlreadyInitialized()) { // Because initialization was already done, no particular process is performed NN_LOG("Camera is already initialized.\n"); break; } else if (result == nn::camera::ResultIsSleeping()) { // Fail because system is closed // If there is a request from the main thread to enter the wait state, block until end. if (s_IsCameraThreadSleep) { s_IsCameraThreadSleep = false; // Notify that the camera thread has entered the wait state (to Sleep, Home) s_CameraThreadSleepAckEvent.Signal(); // Wait until the thread recovery signal arrives (from Sleep, Home) s_CameraThreadAwakeEvent.Wait(); } else { // Even if there is no request, block for a period of time in order to create an interval to retry. nn::os::Thread::Sleep(nn::fnd::TimeSpan::FromMilliSeconds(10)); } // Retry NN_LOG("ShellClose: Retry camera initialization\n"); } } // Initialize camera settings while (true) { // If the end flag is set, stop initialization and instead finalize. if (s_IsCameraThreadEnd) { nn::camera::Finalize(); return; } result = InitializeCameraSetting(); if (result.IsSuccess()) { break; } else if (result == nn::camera::ResultFatalError()) { NN_PANIC("Camera has broken.\n"); } else if (result == nn::camera::ResultIsSleeping()) { // Fail because system is closed // If there is a request from the main thread to enter the wait state, block until end. if (s_IsCameraThreadSleep) { s_IsCameraThreadSleep = false; // Notify that the camera thread has entered the wait state (to Sleep, Home) s_CameraThreadSleepAckEvent.Signal(); // Wait until the thread recovery signal arrives (from Sleep, Home) s_CameraThreadAwakeEvent.Wait(); } else { // Even if there is no request, block for a period of time in order to create an interval to retry. nn::os::Thread::Sleep(nn::fnd::TimeSpan::FromMilliSeconds(10)); } // Retry NN_LOG("ShellClose: Retry camera setting\n"); } } // Render settings for correcting offsets in the camera positions SetCalibration(); InitializeY2r(); // Y2R library-related initialization InitializeResource(); // Resource initialization // Initialization has completed, so priority changes to become higher than the main thread. nn::os::Thread::ChangeCurrentPriority(s_MainThreadPriority - 3); s_IsFinishCameraSetting = true; } //============================================================================== /* Please see man pages for details */ nn::Result InitializeCameraSetting() { /* Please see man pages for details */ /* Please see man pages for details */ // PORT_CAM1 is the port that corresponds to the inner camera or the outer camera on the right. // PORT_CAM2 is the port that corresponds to the outer camera on the left. // Use PORT_BOTH to specify both at the same time. // To paste an image as a texture, we must trim the image's dimensions to powers of two. // VGA (640x480) is not a power of 2, so you may think to trim to 512x256, but here we want to trim the image as little as possible, so we trim to 512x384 and place the trimmed image in the center of a 512x512 texture. // // s_TrimmingWidth = 512; s_TrimmingHeight = 384; nn::camera::SetTrimming(nn::camera::PORT_BOTH, true); nn::camera::SetTrimmingParamsCenter( nn::camera::PORT_BOTH, static_cast(s_TrimmingWidth), static_cast(s_TrimmingHeight), 640, 480); s_TextureWidth = 512; s_TextureHeight = 512; // Specify the transfer size for image data. // With the CTR, the image data input from the camera is accumulated in FIFO, and the image is transferred once data of the specified transfer size has been accumulated. // // You can specify a size of up to 10240 bytes, but an error is generated when FIFO overflows, so we recommend setting half that which is 5120 bytes. // // The GetMaxBytes function returns the maximum transfer size that matches the image size without exceeding 5120 bytes. s_YuvTransferUnit = nn::camera::GetMaxBytes(s_TrimmingWidth, s_TrimmingHeight); nn::camera::SetTransferBytes(nn::camera::PORT_BOTH, s_YuvTransferUnit, s_TrimmingWidth, s_TrimmingHeight); // Get events used for buffer error notifications. // A buffer error is reported when the transfer of camera data fails (when FIFO overflows), so you need to take appropriate steps to resume transferring and capturing data. // nn::camera::GetBufferErrorInterruptEvent(&s_aCameraBufferErrorEvent[CAMERA_RIGHT], nn::camera::PORT_CAM1); nn::camera::GetBufferErrorInterruptEvent(&s_aCameraBufferErrorEvent[CAMERA_LEFT], nn::camera::PORT_CAM2); // Get events used for camera V-Blank notifications. // Frame rates are calculated and camera settings are changed during V-Blanks. nn::camera::GetVsyncInterruptEvent(&s_aCameraVsyncEvent[CAMERA_RIGHT], nn::camera::PORT_CAM1); nn::camera::GetVsyncInterruptEvent(&s_aCameraVsyncEvent[CAMERA_LEFT], nn::camera::PORT_CAM2); // Set the image size. Here, we set the maximum size that can be output by the camera to the VGA resolution (640x480). NN_UTIL_RETURN_IF_FAILED(nn::camera::SetSize(nn::camera::SELECT_OUT1_OUT2, nn::camera::SIZE_VGA, nn::camera::CONTEXT_A)); s_OriginalWidth = 640; // As the norm, the filter to reduce noise in dark locations operates automatically in the camera module, so dark images are out of focus. // // The stereo cameras blur only one of the images for some subjects, making them hard to look at. // Therefore, it is recommended to turn off noise-reduction filters. NN_UTIL_RETURN_IF_FAILED(nn::camera::SetNoiseFilter(nn::camera::SELECT_OUT1_OUT2, false)); // Auto-exposure operates independently for the left and right stereo cameras (it is not linked between the two cameras). // The degree of auto-exposure is calculated based on the brightness of the image. However, the scenery captured by the left and right cameras will have differences, and while the brightness of the overall image is used to determine how auto-exposure will work, there may be significant differences in the brightness of the left and right images. // // To reduce these differences in brightness, the region captured in both cameras (i.e., the right side of the region captured by the left camera, and the left side of the region captured by the right camera) is used as the region to determine how auto-exposure will work. // NN_UTIL_RETURN_IF_FAILED(nn::camera::SetAutoExposureWindow(nn::camera::SELECT_OUT1, 0, 0, 480, 480)); NN_UTIL_RETURN_IF_FAILED(nn::camera::SetAutoExposureWindow(nn::camera::SELECT_OUT2, 160, 0, 480, 480)); // Set the region used to determine how auto white balance works to be just like the region used to determine how auto-exposure works. // This reduces differences in color caused by differences in how auto white balance works. NN_UTIL_RETURN_IF_FAILED(nn::camera::SetAutoWhiteBalanceWindow(nn::camera::SELECT_OUT1, 0, 0, 480, 480)); NN_UTIL_RETURN_IF_FAILED(nn::camera::SetAutoWhiteBalanceWindow(nn::camera::SELECT_OUT2, 160, 0, 480, 480)); // Get the Y2R conversion coefficient suited to the data output by the camera. // This value is used when initializing Y2R. // It is unnecessary if you are not using Y2R. NN_UTIL_RETURN_IF_FAILED(nn::camera::GetSuitableY2rStandardCoefficient(&s_Coefficient)); return nn::ResultSuccess(); } //InitializeCameraSetting() // Update the state of the camera. bool UpdateCamera(void) { nn::Result result; /* Please see man pages for details */ if (s_IsCameraActiveTarget && !s_IsCameraActive) { // Activate cameras that were in standby mode. // After the cameras have been activated, call the StartCapture function to start capturing images. result = nn::camera::Activate(nn::camera::SELECT_OUT1_OUT2); if (result.IsSuccess()) { s_IsCameraActive = true; // The VSync signal may have shifted when the cameras are re-activated. // We must therefore re-synchronize it. s_IsNeedSynchronizeVsyncTiming = true; // Because the cameras have been re-activated, we need to wait for auto-exposure to stabilize. s_CameraFrameCount = 0; } else if (result == nn::camera::ResultIsSleeping()) { // If the system is closed, a failure may occur here. // Retry on recovery from sleep. NN_LOG("ShellClose: Camera activation is failed.\n"); return false; } else if (result == nn::camera::ResultFatalError()) { NN_PANIC("Camera has broken.\n"); } } /* Please see man pages for details */ if (!s_IsCameraActiveTarget && s_IsCameraActive) { // Transition cameras from active state to standby. // Stop capture before transitioning cameras to standby. StopCameraCapture(); // Transition cameras in standby. result = nn::camera::Activate(nn::camera::SELECT_NONE); if (result.IsSuccess()) { s_IsCameraActive = false; } else if (result == nn::camera::ResultIsSleeping()) { // If the system is closed, a failure may occur here. // Retry on recovery from sleep. NN_LOG("ShellClose: Camera deactivation is failed.\n"); return false; } else if (result == nn::camera::ResultFatalError()) { NN_PANIC("Camera has broken.\n"); } } // If cameras are not active, settings cannot be configured, so here we update and finalize. if (!s_IsCameraActive) { return true; } // Synchronize the timing at which the cameras take their subjects. // In the CTR, the cameras operate independently, so even if they are set to the same frame rate they will take images with different timing. // // If this difference in timing is large, there will be a discrepancy in motion between the left and right camera subjects. // Therefore, use the SynchronizeVsyncTiming function to allow the subject to be taken as close to the same timing as possible. // By using this function, you can reduce timing discrepancies to approximately 100 microseconds (in the Release build). // This function must be called while the cameras are active. if (s_IsNeedSynchronizeVsyncTiming) { StopCameraCapture(); result = nn::camera::SynchronizeVsyncTiming(nn::camera::SELECT_OUT1, nn::camera::SELECT_OUT2); if (result.IsSuccess()) { s_IsNeedSynchronizeVsyncTiming = false; } else if (result == nn::camera::ResultIsSleeping()) { // If the system is closed, a failure may occur here. // Retry on recovery from sleep. NN_LOG("ShellClose: Camera Synchronization is failed\n"); return false; } else if (result == nn::camera::ResultFatalError()) { NN_PANIC("Camera has broken.\n"); } } if (s_IsCameraCaptureEnable) { // Capture stops when there is an error, so reconfigure the transfer settings and resume capture operations. // RestartCameraCapture(); } else { StopCameraCapture(); } return true; } //UpdateCamera() //============================================================================== /* Please see man pages for details */ void InitializeY2r(void) { // Initialization of the Y2R library if (!nn::y2r::Initialize()) { NN_PANIC("Y2R is using by other process.\n"); } // Force the library to stop because it cannot be configured during a conversion. nn::y2r::StopConversion(); while (nn::y2r::IsBusyConversion()) { nn::os::Thread::Sleep(nn::fnd::TimeSpan::FromMicroSeconds(100)); } // Set the input format. // The image data that is input from the camera is in YUV4:2:2 format, which is a format that can be processed in batch mode. // nn::y2r::SetInputFormat(nn::y2r::INPUT_YUV422_BATCH); // Set the output format. // We set it to 24-bit RGB here. // If you want to complete the conversion more quickly, you need to reduce the data size, so another option is set RGB 16bit. // nn::y2r::SetOutputFormat(nn::y2r::OUTPUT_RGB_24); //nn::y2r::SetOutputFormat( nn::y2r::OUTPUT_RGB_16_565 ); // Set the output data rotation. // No rotation is necessary. nn::y2r::SetRotation(nn::y2r::ROTATION_NONE); // Set the output data ordering (sequence). // Since we want to apply the converted image as-is as a texture, set to 8x8 block format, which is the sort order that supports DMP_NATIVE. // // If an image is not going to be rendered, the image processing is sometimes easier if you select line format. // nn::y2r::SetBlockAlignment(nn::y2r::BLOCK_8_BY_8); //nn::y2r::SetBlockAlignment( nn::y2r::BLOCK_LINE ); // Configure whether we will receive a notification when the Y2R conversion is complete. nn::y2r::SetTransferEndInterrupt(true); nn::y2r::GetTransferEndEvent(&s_Y2rEndEvent); // Set the size of the input image for Y2R conversion. nn::y2r::SetInputLineWidth(s_TrimmingWidth); nn::y2r::SetInputLines(s_TrimmingHeight); // Sets the conversion coefficients. // The camera module installed on the CTR system may change in the future. // For this reason, instead of specifying specific conversion coefficients, we recommend using the GetSuitableY2rStandardCoefficient function to get the conversion coefficient that matches the camera, and using this to set the value. // // // (In this demo, set the value already obtained when camera was initialized.) nn::y2r::SetStandardCoefficient(s_Coefficient); // Set the alpha value for a 16-bit or 32-bit RGB output format. // (This demo does not use it so it does not need to be set.) nn::y2r::SetAlpha(0xFF); } //InitializeY2r() //============================================================================== /* Please see man pages for details */ void InitializeResource(void) { // Allocate a buffer for transferring camera image data. s_YuvBufferSize = nn::camera::GetFrameBytes(s_TrimmingWidth, s_TrimmingHeight); for (s32 i = 0; i < CAMERA_NUM; i++) { for (s32 j = 0; j < YUV_BUFFER_NUM; j++) { s_paaYuvBuffer[i][j] = static_cast(s_AppHeap.Allocate(s_YuvBufferSize, 64)); memset(s_paaYuvBuffer[i][j], 0, s_YuvBufferSize); } s_aYuvCapturing[i] = 0; s_aYuvLatestCaptured[i] = YUV_BUFFER_NUM - 1; s_aYuvReading[i] = YUV_BUFFER_NUM - 1; s_aCsYuvSwap[i].Initialize(); } // In this sample demo, the main thread handles Y2R conversion. // Initialize the queue used to wait for conversion requests. s_Y2rRequestQueue.Initialize(s_aY2rRequestQueueMessage, Y2R_REQUEST_QUEUE_LENGTH); // Allocate a buffer for storing the results of Y2R conversion on both the left and right camera images. s_RgbBufferSize = nn::y2r::GetOutputImageSize(s_TextureWidth, s_TextureHeight, nn::y2r::OUTPUT_RGB_24); for (s32 i = 0; i < CAMERA_NUM; i++) { NN_ASSERT(!s_paRgbBuffer[i]); s_paRgbBuffer[i] = static_cast(s_AppHeap.Allocate(s_RgbBufferSize, 64)); memset(s_paRgbBuffer[i], 0, s_RgbBufferSize); } } //InitializeResource() //============================================================================== /* Please see man pages for details */ void FinalizeResource(void) { for (int i = 0; i < CAMERA_NUM; i++) { // Free the allocated buffers. for (s32 j = 0; j < YUV_BUFFER_NUM; j++) { if (s_paaYuvBuffer[i][j]) { s_AppHeap.Free(s_paaYuvBuffer[i][j]); s_paaYuvBuffer[i][j] = NULL; } } if (s_paRgbBuffer[i]) { s_AppHeap.Free(s_paRgbBuffer[i]); s_paRgbBuffer[i] = NULL; } // Also destroy events obtained from the camera library. s_aCameraRecvEvent[i].Finalize(); s_aCameraBufferErrorEvent[i].Finalize(); s_aCameraVsyncEvent[i].Finalize(); s_aCsYuvSwap[i].Finalize(); } s_Y2rRequestQueue.Finalize(); } //FinalizeResource() //============================================================================== /* Please see man pages for details */ void CameraThreadFunc(uptr param NN_IS_UNUSED_VAR) { // Camera initialization SetupCamera(); // Exit if a request to finalize the thread comes during camera initialization. if (s_IsCameraThreadEnd) { return; } enum { EVENT_RECV_R, EVENT_RECV_L, EVENT_ERROR_R, EVENT_ERROR_L, EVENT_VSYNC_R, EVENT_VSYNC_L, EVENT_MAX }; nn::os::WaitObject* pa_WaitObject[EVENT_MAX]; // If the cameras were activated before the thread was started, the Vsync event may have been signaled already, so clear the signal. // s_aCameraVsyncEvent[CAMERA_RIGHT].ClearSignal(); s_aCameraVsyncEvent[CAMERA_LEFT].ClearSignal(); // We use a buffer error event when first starting because this is processed just like a recovery from a buffer error. s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); while (1) { // The SetReceiving function replaces the event notifying the completion of transfer, so only one of the events for reconfiguring the transfer settings is executed here. In this way, you never end up waiting on a nonexistent event. // // // pa_WaitObject[EVENT_RECV_R ] = &s_aCameraRecvEvent[CAMERA_RIGHT]; pa_WaitObject[EVENT_RECV_L ] = &s_aCameraRecvEvent[CAMERA_LEFT]; pa_WaitObject[EVENT_ERROR_R] = &s_aCameraBufferErrorEvent[CAMERA_RIGHT]; pa_WaitObject[EVENT_ERROR_L] = &s_aCameraBufferErrorEvent[CAMERA_LEFT]; pa_WaitObject[EVENT_VSYNC_R] = &s_aCameraVsyncEvent[CAMERA_RIGHT]; pa_WaitObject[EVENT_VSYNC_L] = &s_aCameraVsyncEvent[CAMERA_LEFT]; s32 num = nn::os::WaitObject::WaitAny(pa_WaitObject, EVENT_MAX); // Sleep if (s_IsCameraThreadSleep) { s_IsCameraThreadSleep = false; // Stop camera capture StopCameraCapture(); // Clear the Y2R conversion requests before going to sleep. uptr msg; while (s_Y2rRequestQueue.TryDequeue(&msg)) { } // Notify that the camera thread has entered the wait state (to Sleep, Home) s_CameraThreadSleepAckEvent.Signal(); // Wait until the thread recovery signal arrives (from Sleep, Home) s_CameraThreadAwakeEvent.Wait(); // Clear signaled V-Blank notifications before going to sleep. s_aCameraVsyncEvent[CAMERA_RIGHT].ClearSignal(); s_aCameraVsyncEvent[CAMERA_LEFT].ClearSignal(); s_VsyncTimingDifference = 0; // Camera Vsync synchronization is required after returning from sleep. s_IsNeedSynchronizeVsyncTiming = true; // Use the buffer error mechanism to resume capture. s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); } // End the thread if (s_IsCameraThreadEnd) { StopCameraCapture(); break; } switch(num) { case EVENT_RECV_R: case EVENT_RECV_L: { // Re-configure transfers CameraRecvFunc((num == EVENT_RECV_R) ? nn::camera::PORT_CAM1 : nn::camera::PORT_CAM2); } break; case EVENT_ERROR_R: case EVENT_ERROR_L: { s_aCameraBufferErrorEvent[CAMERA_RIGHT].ClearSignal(); s_aCameraBufferErrorEvent[CAMERA_LEFT].ClearSignal(); if (!UpdateCamera()) { // Failed on system closure. // Signal a buffer error event so you can do UpdateCamera again after the system has been opened. s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); if (!s_IsCameraThreadSleep) { // Even if there is no request to enter the wait state, sleep for a period of time in order to create an interval to retry. nn::os::Thread::Sleep(nn::fnd::TimeSpan::FromMilliSeconds(10)); } } } break; case EVENT_VSYNC_R: case EVENT_VSYNC_L: { CameraVsyncFunc((num == EVENT_VSYNC_R) ? nn::camera::PORT_CAM1 : nn::camera::PORT_CAM2); } break; default: { debug_print("Illegal event\n"); } break; }; //Switch } //while { // Finalization of CAMERA/Y2R // If you do not perform finalization with the following procedure, // there is a possibility that sound noise will be generated in the HOME Menu. nn::y2r::StopConversion(); // (1) Stop Y2R conversion nn::camera::StopCapture(nn::camera::PORT_BOTH); // (2) Stop capture nn::camera::Activate(nn::camera::SELECT_NONE); // (3) Set all cameras to standby state nn::camera::Finalize(); // (4) End camera nn::y2r::Finalize(); // End Y2R } { // Free the resources used for CAMERA/Y2R FinalizeResource(); } } //CameraThreadFunc() //============================================================================== /* Please see man pages for details */ void CameraRecvFunc(nn::camera::Port port) { s32 index = (port == nn::camera::PORT_CAM1) ? 0 : 1; // For the first four frames after the cameras are activated, the images that we get may be extremely dark. // The number of frames until auto-exposure stabilizes, together with these four frames, is around 14 frames indoors and around 30 frames outdoors. // // Here we wait for 30 frames before starting Y2R conversion. if (s_CameraFrameCount >= FRAME_NUM_FOR_STABILIZE /* == 30 */) { // Switch the write buffer. { // Block so that this does not overlap with the process of switching the read buffer. nn::os::CriticalSection::ScopedLock sl(s_aCsYuvSwap[index]); s_aYuvLatestCaptured[index] = s_aYuvCapturing[index]; do { if (++s_aYuvCapturing[index] >= YUV_BUFFER_NUM) { s_aYuvCapturing[index] = 0; } } while (s_aYuvCapturing[index] == s_aYuvReading[index]); } // Make a Y2R conversion request. { Y2rRequest_st* p_Req = &(saa_Y2rRequestBuffer[index][s_aY2rRequestBufferCounter[index]]); p_Req->index = index; p_Req->time = nn::os::Tick::GetSystemCurrent(); // Put the request in the queue if (!s_Y2rRequestQueue.TryEnqueue(reinterpret_cast(p_Req))) { debug_print("Request port=%d: Queue is full.\n", index); } // Advance the circular buffer to the beginning for the next time if (++s_aY2rRequestBufferCounter[index] >= Y2R_REQUEST_BUFFER_MAX) { s_aY2rRequestBufferCounter[index] = 0; } } } // Configure the next frame's transfer. nn::camera::SetReceiving( & s_aCameraRecvEvent[index], s_paaYuvBuffer[index][s_aYuvCapturing[index]], port, s_YuvBufferSize, s_YuvTransferUnit); } //CameraRecvFunc() //============================================================================== /* Please see man pages for details */ void CameraVsyncFunc(nn::camera::Port port) { s32 index = (port == nn::camera::PORT_CAM1) ? CAMERA_RIGHT : CAMERA_LEFT; // Get the timing at which the V-Blank occurred. GetLatestVsyncTiming(saa_LatestVsyncTiming[index], port, 2); // To determine whether auto-exposure has stabilized, count up to 30 camera frames. // if ((index == 1) && (s_CameraFrameCount < FRAME_NUM_FOR_STABILIZE /* == 30 */)) { s_CameraFrameCount++; } // Find the difference in V-Blank timing between the left and right cameras. if (index == 1) { s_VsyncTimingDifference = saa_LatestVsyncTiming[CAMERA_LEFT][0].GetMicroSeconds() - saa_LatestVsyncTiming[CAMERA_RIGHT][0].GetMicroSeconds(); } // Calculate the V-Blank interval in order to calculate the frame rate. saa_VsyncInterval[index][s_aVsyncIntervalPos[index]] = saa_LatestVsyncTiming[index][0].GetMicroSeconds() - saa_LatestVsyncTiming[index][1].GetMicroSeconds(); if (++s_aVsyncIntervalPos[index] >= VSYNC_INTERVAL_LOG_NUM) { s_aVsyncIntervalPos[index] = 0; } // Calculate the frame rate. Here we take the average of four frames. s64 sum = 0; s32 i = 0; for (i = 0; i < VSYNC_INTERVAL_LOG_NUM; i++) { if (saa_VsyncInterval[index][i] <= 0) { break; } sum += saa_VsyncInterval[index][i]; } if (sum != 0) { s_aFps[index] = static_cast(i) * 1000000000LL / sum; } } //CameraVsyncFunc() //============================================================================== /* Please see man pages for details */ void StopCameraCapture(void) { // Do nothing if StartCapture is not called. if (!s_IsCameraCaptureStarted) { return; } // Stop capture operations. nn::camera::StopCapture(nn::camera::PORT_BOTH); // Capture actually stops at the end of the frame, which is not necessarily immediately after the call to StopCapture, so wait for capture to end. // // A locking mechanism is necessary because IsBusy would wait indefinitely if StartCapture were to be run from another thread. // s32 cnt = 0; while (nn::camera::IsBusy(nn::camera::PORT_CAM1) || nn::camera::IsBusy(nn::camera::PORT_CAM2)) { nn::os::Thread::Sleep(nn::fnd::TimeSpan::FromMilliSeconds(1)); // Depending on when the cameras are stopped, if the system was closed while IsBusy was polling it may continue to return 'true' until the system is opened again. To avoid an infinite loop, do a timeout. // // // // Normally, IsBusy is 'true' for at most 1 frame, so set the timeout time to 1 frame. // if (++cnt > 66) { NN_LOG("Busy timeout\n"); break; } } nn::camera::ClearBuffer(nn::camera::PORT_BOTH); s_IsCameraCaptureStarted = false; } //StopCameraCapture() //============================================================================== /* Please see man pages for details */ void RestartCameraCapture(void) { // Stop capturing images from both ports. // Even though one of the ports has already been stopped, capture is stopped from both ports for simplicity. // StopCameraCapture(); // Re-configure transfers. nn::camera::SetReceiving( & s_aCameraRecvEvent[CAMERA_RIGHT], s_paaYuvBuffer[CAMERA_RIGHT][s_aYuvCapturing[CAMERA_RIGHT]], nn::camera::PORT_CAM1, s_YuvBufferSize, s_YuvTransferUnit); nn::camera::SetReceiving( & s_aCameraRecvEvent[CAMERA_LEFT], s_paaYuvBuffer[CAMERA_LEFT][s_aYuvCapturing[CAMERA_LEFT]], nn::camera::PORT_CAM2, s_YuvBufferSize, s_YuvTransferUnit); // A locking mechanism is unnecessary because StartCapture and StopCapture are called only from the camera thread. nn::camera::StartCapture(nn::camera::PORT_BOTH); s_IsCameraCaptureStarted = true; } //RestartCameraCapture() //============================================================================== /* Please see man pages for details */ bool Y2rConversion(s32 index) { // Switch the camera read buffer. { nn::os::CriticalSection::ScopedLock sl(s_aCsYuvSwap[index]); s_aYuvReading[index] = s_aYuvLatestCaptured[index]; } // *** BEGIN WARNING *** // // Due to a hardware bug in Y2R, when the camera and Y2R are being used at the same time, there is a possibility that the recovery from a camera buffer error could cause Y2R transfers to hang, depending on the timing of that recovery. // // // In this case, the conversion completion event obtained by nn::y2r::GetTransferEndEvent might never be signaled. // // For details on when this problem occurs and how to deal with it, see the Function Reference Manual for the Y2R library. // // In this sample, a timeout is inserted in the wait for the above event, and when a timeout occurs, a retry is performed. // // // *** END WARNING *** // We add an offset because we want to place a 512x384 image in the middle of a 512x512 buffer. s32 offset = nn::y2r::GetOutputImageSize( s_TextureWidth, (s_TextureHeight - s_TrimmingHeight) / 2, nn::y2r::OUTPUT_RGB_24); s32 cnt = 0; while (true) { // Configure a transfer for Y2R output images. nn::y2r::CTR::SetReceiving( s_paRgbBuffer[index] + offset, nn::y2r::GetOutputImageSize(s_TrimmingWidth, s_TrimmingHeight, nn::y2r::OUTPUT_RGB_24), nn::y2r::GetOutputBlockSize(s_TrimmingWidth, nn::y2r::OUTPUT_RGB_24)); // Configure a transfer for Y2R input images. // Transfers are measured in lines of data. nn::y2r::CTR::SetSendingYuv( s_paaYuvBuffer[index][s_aYuvReading[index]], nn::camera::GetFrameBytes(s_TrimmingWidth, s_TrimmingHeight), nn::camera::GetLineBytes(s_TrimmingWidth)); // Start Y2R conversion. nn::y2r::CTR::StartConversion(); // Waits for Y2R conversion completion because data transfer is aborted if the transfer for the next conversion is performed before conversion of the first image has completed. // // We insert a timeout to deal with the bug that causes Y2R to hang. // Because the conversion process takes roughly 10 ms for a 512x384 image transferred in 24-bit RGB format, a timeout is set that is twice as long (20 ms). // // The size of the camera image and the format of the output image affect how long it takes to convert the image. if (s_Y2rEndEvent.Wait(nn::fnd::TimeSpan::FromMilliSeconds(20))) { // Conversion succeeded return true; } else { // Conversion failed NN_LOG("Y2R may have hung up.\n"); // Force conversion to stop nn::y2r::StopConversion(); // Although it is very rare for problems to occur over and over, the probability of this happening is not zero, so to play it safe a process is inserted to explicitly exit the loop. // // We give up on converting this image here and move on to converting the next image. if (++cnt >= 2) { // Give up on conversion return false; } // Retry } } //while(true) } //Y2rConversion() } //Namespace //============================================================================= /* Please see man pages for details */ namespace { //------------------------------------------------------------ // Preparation before sleep/transition (Called from main thread.) //------------------------------------------------------------ void PrepareTransitionCallback(void) { // If camera thread is not finalized, transition to wait state. if (!s_IsCameraThreadEnd) { // Notify camera thread to transition to wait state (to CameraThreadFunc). s_IsCameraThreadSleep = true; // Signal so that the camera thread does not block with WaitAny (to CameraThreadFunc) s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); // Wait for camera thread to transition to wait state (from CameraThreadFunc). s_CameraThreadSleepAckEvent.Wait(); } } //------------------------------------------------------------ // Recover processing after sleep/transition. (Called from main thread.) //------------------------------------------------------------ void AfterTransitionCallback(void) { // If camera thread is not finalized, cancel the wait state. // If recovering in order to finalize the application, here you finalize the camera thread without canceling. if (!s_IsCameraThreadEnd && !TransitionHandler::IsExitRequired()) { // Signal to start the camera thread (to CameraThreadFunc) s_CameraThreadAwakeEvent.Signal(); } } //----------------------------------------------------- // Start camera thread. //----------------------------------------------------- void InitializeCameraThread(void) { s_CameraThreadSleepAckEvent.Initialize(false); s_CameraThreadAwakeEvent.Initialize(false); s_aCameraRecvEvent[CAMERA_RIGHT].Initialize(false); s_aCameraRecvEvent[CAMERA_LEFT].Initialize(false); s_aCameraBufferErrorEvent[CAMERA_RIGHT].Initialize(false); // This will be used when recovering from the wait state, so initialize it first. TransitionHandler::SetPrepareSleepCallback(PrepareTransitionCallback); TransitionHandler::SetAfterSleepCallback(AfterTransitionCallback); TransitionHandler::SetPrepareHomeButtonCallback(PrepareTransitionCallback); TransitionHandler::SetAfterHomeButtonCallback(AfterTransitionCallback); // Remember the priority of the main thread so you can change the priority later. s_MainThreadPriority = nn::os::Thread::GetCurrentPriority(); // Create a thread for camera processing. // While initializing, the priority is set lower than the main thread. // After initialization has completed, the priority changes to be higher than the main thread. s_CameraThread.StartUsingAutoStack( CameraThreadFunc, NULL, 8192, s_MainThreadPriority + 3); } //----------------------------------------------------- // Finalization of camera thread //----------------------------------------------------- void FinalizeCameraThread(void) { // Destroy the thread for camera processing s_IsCameraThreadEnd = true; // Set the end flag s_CameraThreadAwakeEvent.Signal(); // Signal so that it does not stop with the sleep state s_aCameraBufferErrorEvent[CAMERA_RIGHT].Signal(); // Signal so that the camera thread does not stop with WaitAny s_CameraThread.Join(); // Wait for thread to end s_CameraThread.Finalize(); // Discard thread TransitionHandler::SetPrepareSleepCallback(NULL); TransitionHandler::SetAfterSleepCallback(NULL); TransitionHandler::SetPrepareHomeButtonCallback(NULL); TransitionHandler::SetAfterHomeButtonCallback(NULL); s_CameraThreadSleepAckEvent.Finalize(); s_CameraThreadAwakeEvent.Finalize(); } } //Namespace //============================================================================= /* Please see man pages for details */ namespace { //============================================================================= void InitializeApplet(void) { TransitionHandler::Initialize(); TransitionHandler::EnableSleep(); } //InitializeApplet() //============================================================================= void FinalizeApplet(void) { TransitionHandler::DisableSleep(); TransitionHandler::Finalize(); } //FinalizeApplet() } //Namespace //============================================================================= /* Please see man pages for details */ namespace { //============================================================================= int InitializeGx(void) { nn::fs::Initialize(); NN_ASSERT(!s_AddrForGxHeap); s_AddrForGxHeap = reinterpret_cast(s_AppHeap.Allocate(MEMORY_SIZE_FCRAM_GX)); s_RenderSystem.Initialize(s_AddrForGxHeap, MEMORY_SIZE_FCRAM_GX); s_ProgramID = glCreateProgram(); s_ShaderID = glCreateShader(GL_VERTEX_SHADER); nn::fs::FileReader file(L"rom:/shader.shbin"); size_t fileSize = file.GetSize(); void* buf = s_AppHeap.Allocate(fileSize); s32 read = file.Read(buf, fileSize); glShaderBinary(1, &s_ShaderID, GL_PLATFORM_BINARY_DMP, buf, read); file.Finalize(); s_AppHeap.Free(buf); glAttachShader(s_ProgramID, s_ShaderID); glAttachShader(s_ProgramID, GL_DMP_FRAGMENT_SHADER_DMP); glBindAttribLocation(s_ProgramID, 0, "aPosition"); glBindAttribLocation(s_ProgramID, 1, "aTexCoord"); glLinkProgram(s_ProgramID); glValidateProgram(s_ProgramID); glUseProgram(s_ProgramID); glGenTextures(CAMERA_NUM, s_Texture); glBindTexture(GL_TEXTURE_2D, s_Texture[CAMERA_RIGHT]); glBindTexture(GL_TEXTURE_2D, s_Texture[CAMERA_LEFT]); glBindTexture(GL_TEXTURE_2D, 0); s_RenderSystem.SetClearColor(NN_GX_DISPLAY0, 0.36f, 0.42f, 0.5f, 1.0f); s_RenderSystem.SetClearColor(NN_GX_DISPLAY1, 0.0f, 0.0f, 0.0f, 1.0f); s_RenderSystem.SetClearColor(NN_GX_DISPLAY0_EXT, 0.36f, 0.42f, 0.5f, 1.0f); glClearDepthf(1.f); glEnable(GL_CULL_FACE); glFrontFace(GL_CCW); glCullFace(GL_BACK); LoadObjects(); s_RenderSystem.SetLcdMode(NN_GX_DISPLAYMODE_STEREO); return 0; } //============================================================================== void FinalizeGx(void) { DeleteObjects(); glDeleteTextures(CAMERA_NUM, s_Texture); glDetachShader(s_ProgramID, GL_DMP_FRAGMENT_SHADER_DMP); glDetachShader(s_ProgramID, s_ShaderID); glDeleteShader(s_ShaderID); glDeleteProgram(s_ProgramID); s_RenderSystem.Finalize(); if (s_AddrForGxHeap) { s_AppHeap.Free(reinterpret_cast(s_AddrForGxHeap)); s_AddrForGxHeap = 0; } } //============================================================================= void LoadObjects(void) { // Objects used to paste a 512x512 camera image as a texture // The size is set to a value that can be divided by 512 in order to make it easier to calculate the camera position for displaying images pixel-by-pixel, and to calculate the amount to move to correct for position offsets in the stereo camera. // // GLfloat coords[] = { -1.024f, 1.024f, 0.f, 1.f, -1.024f,-1.024f, 0.f, 1.f, 1.024f, 1.024f, 0.f, 1.f, 1.024f,-1.024f, 0.f, 1.f }; GLfloat texcoords[] = { 0.f, 1.f, 0.f, 0.f, 0.f, 0.f, 1.f, 1.f, 0.f, 1.f, 0.f, 0.f, }; GLushort idxs[] = { 0, 1, 2, 3 }; glGenBuffers(1, &s_ArrayBufferID); glBindBuffer(GL_ARRAY_BUFFER, s_ArrayBufferID); glBufferData(GL_ARRAY_BUFFER, sizeof(coords) + sizeof(texcoords), 0, GL_STATIC_DRAW); glBufferSubData(GL_ARRAY_BUFFER, 0, sizeof(coords), coords); glBufferSubData(GL_ARRAY_BUFFER, sizeof(coords), sizeof(texcoords), texcoords); glGenBuffers(1, &s_ElementArrayBufferID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s_ElementArrayBufferID); glBufferData(GL_ELEMENT_ARRAY_BUFFER, sizeof(idxs), idxs, GL_STATIC_DRAW); glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0) ; glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, reinterpret_cast(sizeof(coords))); } //============================================================================== void DeleteObjects(void) { glDeleteBuffers(1, &s_ElementArrayBufferID); glDeleteBuffers(1, &s_ArrayBufferID); } //============================================================================= void ReadyObjects(void) { glUseProgram(s_ProgramID); glBindBuffer(GL_ARRAY_BUFFER, s_ArrayBufferID); glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, s_ElementArrayBufferID); glEnableVertexAttribArray(0); glEnableVertexAttribArray(1); glVertexAttribPointer(0, 4, GL_FLOAT, GL_FALSE, 0, 0); glVertexAttribPointer(1, 3, GL_FLOAT, GL_FALSE, 0, reinterpret_cast(16 * sizeof(GLfloat))); } //============================================================================= void SetTextureCombiner(void) { glUniform4f(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].constRgba"), 0.0f, 0.0f, 0.0f, 1.0f); glUniform3i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].srcRgb"), GL_TEXTURE0, GL_PREVIOUS, GL_PREVIOUS); glUniform3i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].srcAlpha"), GL_CONSTANT, GL_PREVIOUS, GL_PREVIOUS); glUniform3i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].operandRgb"), GL_SRC_COLOR, GL_SRC_COLOR, GL_SRC_COLOR); glUniform3i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].operandAlpha"),GL_SRC_ALPHA , GL_SRC_ALPHA, GL_SRC_ALPHA); glUniform1i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].combineRgb"), GL_REPLACE); glUniform1i(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].combineAlpha"), GL_REPLACE); glUniform1f(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].scaleRgb"), 1.0f); glUniform1f(glGetUniformLocation(s_ProgramID, "dmp_TexEnv[2].scaleAlpha"), 1.0f); } //============================================================================= void DrawFrame(void) { glClearColor(0.0f, 0.0f, 0.0f, 1.0f); // Render the upper screen (stereoscopic display) DrawDisplay0Stereo(); // Render the lower screen DrawDisplay1(); s_FrameCount++; } //DrawFrame //============================================================================= void DrawDisplay0Stereo(void) { // Render images for the left eye DrawDisplay0(); // Render images for the right eye DrawDisplay0Ext(); s_RenderSystem.SwapBuffers(); } //DrawDisplay0Stereo //============================================================================= void DrawDisplay0(void) { s_RenderSystem.SetRenderTarget(NN_GX_DISPLAY0); s_RenderSystem.Clear(); glViewport(0, 0, nn::gx::DISPLAY0_WIDTH, nn::gx::DISPLAY0_HEIGHT); // Skip the rendering of camera images until after the cameras are initialized. if (s_IsFinishCameraSetting) { // Render left camera images (SELECT_OUT2) DrawCameraImage(1); } s_RenderSystem.Transfer(); } //DrawDisplay0 //============================================================================= void DrawDisplay0Ext(void) { s_RenderSystem.SetRenderTarget(NN_GX_DISPLAY0_EXT); s_RenderSystem.Clear(); glViewport(0, 0, nn::gx::DISPLAY0_WIDTH, nn::gx::DISPLAY0_HEIGHT); // Skip the rendering of camera images until after the cameras are initialized. if (s_IsFinishCameraSetting) { // Render right camera images (SELECT_OUT1) DrawCameraImage(0); } s_RenderSystem.Transfer(); } //DrawDisplay0Ext //============================================================================= void DrawDisplay1(void) { s_RenderSystem.SetRenderTarget(NN_GX_DISPLAY1); s_RenderSystem.Clear(); s_RenderSystem.SetColor(1.0f, 1.0f, 1.0f); s_RenderSystem.SetFontSize(8.0f); s_RenderSystem.DrawText(8.0f, 8.0f, "Frame: %d", s_FrameCount); s_RenderSystem.DrawText(8.0f, 24.0f, "Left : %2d.%03d fps", static_cast(s_aFps[CAMERA_LEFT] / 1000), static_cast(s_aFps[CAMERA_LEFT] % 1000)); s_RenderSystem.DrawText(8.0f, 32.0f, "Right : %2d.%03d fps", static_cast(s_aFps[CAMERA_RIGHT] / 1000), static_cast(s_aFps[CAMERA_RIGHT] % 1000)); s_RenderSystem.DrawText(8.0f, 48.0f, "Vsync timing diff: %lld usec", s_VsyncTimingDifference); s_RenderSystem.DrawText(8.0f, 64.0f, "Camera capture: %s", (s_IsCameraCaptureEnable && s_IsCameraActiveTarget) ? "Enable" : "Disable"); s_RenderSystem.DrawText(8.0f, 72.0f, "Camera device: %s", (s_IsCameraActiveTarget) ? "Active" : "Deactive"); if (s_CameraFrameCount < FRAME_NUM_FOR_STABILIZE) { s_RenderSystem.DrawText(8.0, 88.0f, "Waiting for auto exposure stabilization"); } s_RenderSystem.DrawText(8.0f, 216.0f, "Y button : Stop/Restart capture"); s_RenderSystem.DrawText(8.0f, 224.0f, "X button : Deactive/Active camera"); s_RenderSystem.DrawText(8.0f, 232.0f, "Start button : Finalize"); s_RenderSystem.Transfer(); s_RenderSystem.SwapBuffers(); } //DrawDisplay1() //============================================================================== void SetCalibration(void) { // Calculates the units of translational motion for correction. // Currently, object size is 2.048 * 2.048, and a 512x384 region trimmed from the VGA image is applied as the texture to here. So the amount you need to move the image to move it by only 1 pixel is 2.048 / 512 = 0.004. // // // With this setting, even if the camera image size is changed from VGA image to 512x384, the texture size does not change when the VGA image has been trimmed. // // However, the image is reduced internally by the camera, making the subject smaller compared to the VGA image, so the amount of translational motion to correct should be less by that amount // // At this time, the unit of translational motion for correction is 0.004*512/640 = 0.0032. f32 step = 0.004f * static_cast(s_OriginalWidth) / 640.0f; // Get calibration data. nn::camera::GetStereoCameraCalibrationData(&s_CalData); // Calculate the parallax. // The horizontal parallax determines the apparent depth of the subject (how much the subject appears to jump out of or sink into the screen). // To adjust the apparent depth of a camera subject, move the left and/or right camera images horizontally. // (Because the physical camera positions are fixed, this operation does not change the solidity of the subject.) // Here we find the parallax at a distance of 1 m so that a subject at that distance appears to sit directly on the surface of the LCD. f32 parallax = nn::camera::GetParallax(s_CalData, 1.0f); // Get a calibration matrix. // The size of the (stereoscopic) region over which the left and right images overlap differs from system to system. // This function first takes for the region over which the left and right images overlap and then finds a rectangular region whose sides have a ratio of 400:240. // If the rectangular region is smaller or larger than 400x240, the correction matrix is scaled so the cropped rectangular region can be displayed as 400x240. // // The correction matrix is scaled, so even though we were thinking of setting the screen display to "pixel-by-pixel" above, in fact the image cannot be displayed as pixel-by-pixel. // // If the 9th argument is set to a value larger than 400 (for example, to 480) then the correction matrix will be scaled such that the obtained rectangular region is larger than the width of the screen (400), so the edges of the image will exceed the size of the screen. // //This allows us to adjust the parallax even more. // (This could be used to allow the user to adjust the parallax by hand, for example.) // By specifying 0.0f as the sixth argument, you get a matrix that projects subjects an infinite distance away onto the surface of the screen. By specifying the return value from the GetParallaxOnChart function, you get a matrix that projects subjects 250 mm away onto the surface of the screen. // nn::camera::GetStereoCameraCalibrationMatrixEx( &s_aCalMatrix[CAMERA_RIGHT], &s_aCalMatrix[CAMERA_LEFT], &s_CalScale, s_CalData, step, parallax, s_TrimmingWidth, s_TrimmingHeight, 400, 240); } //============================================================================= /* Please see man pages for details */ void DrawCameraImage(s32 index) { if (!s_ExistRgbImage[index]) { // Do no render if there is no RGB image. return; } glBindTexture(GL_TEXTURE_2D, s_Texture[index]); // Update texture immediately after Y2R conversion if (s_IsUpdateRgbImage[index]) { glTexImage2D( GL_TEXTURE_2D | NN_GX_MEM_FCRAM | GL_NO_COPY_FCRAM_DMP, 0, GL_RGB_NATIVE_DMP, s_TextureWidth, s_TextureHeight, 0, GL_RGB_NATIVE_DMP, GL_UNSIGNED_BYTE, // GL_UNSIGNED_SHORT_5_5_5_1 when the Y2R output format is 16-bit RGB s_paRgbBuffer[index]); s_IsUpdateRgbImage[index] = false; } ReadyObjects(); glUniform1i(glGetUniformLocation(s_ProgramID, "dmp_Texture[0].samplerType"), GL_TEXTURE_2D); glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, s_Texture[index]); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT); SetTextureCombiner(); nn::math::Matrix44 proj, m; // Determine the frustum. // Since height and width are reversed, // DISPLAY0_HEIGHT : 400 // DISPLAY0_WIDTH : 240 // Note that the values are the above. nn::math::MTX44Frustum( &proj, -0.02f, 0.02f, -0.02f * static_cast(nn::gx::DISPLAY0_HEIGHT) / static_cast(nn::gx::DISPLAY0_WIDTH), 0.02f * static_cast(nn::gx::DISPLAY0_HEIGHT) / static_cast(nn::gx::DISPLAY0_WIDTH), 0.2f, 10.f); nn::math::MTX44Transpose(&m, &proj); glUniformMatrix4fv(glGetUniformLocation(s_ProgramID, "uProjection"), 1, GL_FALSE, static_cast(m)); // Here we consider where to place the camera (viewpoint) so that the image is displayed pixel-by-pixel. // Here we consider setting the object size to 2.048 x 2.048, and taking a 512x512 pixel image and pasting it as a texture to the object. // // Because the screen size is 400x240 here, the pixel displayed at the upper-left corner has the coordinates (-0.8, 0.48). // Because the upper-left corner of the near clipping plane has the coordinates (-0.02*400/240, -0.02) and the near clipping plane is separated from the camera by 0.2 along the z-axis, the distance from the camera to the object along the z-axis is 4.8, as determined by the ratio -0.48/0.02 = z/0.2. // // nn::math::Matrix34 eye; nn::math::Vector3 camPos(0.f, 0.f, 4.8f); nn::math::Vector3 camUp( 0.f, 1.f, 0.f); nn::math::Vector3 target(0.f, 0.f, 0.f); nn::math::MTX34LookAt(&eye, &camPos, &camUp, &target); nn::math::MTX34 tmp(eye); { nn::math::MTX34 move; nn::math::MTX34Identity(&move); // Calibrate the stereo cameras (correct the position offsets). // Because there are errors in stereo camera placement during manufacturing, the position of the camera subject will be offset between the left and right cameras. // (There are individual differences in the size of this offset.) // As a result, always correct this offset when rendering. nn::math::MTX34Mult(&move, &s_aCalMatrix[index], &move); // Rotate 90 degrees to the right because the coordinate system is defined by rotating the system, held horizontally, by 90 degrees to the left. nn::math::MTX34 rot90; nn::math::MTX34RotXYZDeg(&rot90, 0.0f, 0.0f, -90.0f); nn::math::MTX34Mult(&move, &rot90, &move); nn::math::MTX34Mult(&tmp, &tmp, &move); } nn::math::MTX44 mv(tmp); nn::math::MTX44Transpose(&mv, &mv); glUniformMatrix4fv(glGetUniformLocation(s_ProgramID, "uModelView"), 1, GL_FALSE, static_cast(mv)); glDisable(GL_BLEND); // Disable the depth test glDisable(GL_DEPTH_TEST); glDrawElements(GL_TRIANGLE_STRIP, 4, GL_UNSIGNED_SHORT, 0); } //DrawCameraImage() } //Namespace