Skip to content

Sample Code#

The following sections list the code samples that are part of the pylon SDK.

Location on Windows#

The pylon sample solutions can be found under <⁠SDK ROOT>\Development\Samples\C++. There are sample solutions available for Microsoft Visual Studio 2010. A sample solution for Visual Studio 2010 can be converted to the required format, e.g., to a Microsoft Visual Studio 2019 solution, by later Visual Studio versions. Additionally, there are CMakeLists.txt files available with one central CMakeLists.txt file at <⁠SDK ROOT>\Development\Samples for all samples.

Using Visual Studio 2015

Visual Studio 2015 doesn't have a native CMake integration.

If there is no CMake >= 3.14 installed, download a CMake Windows installer from https://cmake.org/download. Launch the .msi installer and follow the instructions.

Location on Linux#

The pylon samples can be found under <⁠SDK ROOT>\Samples. There is a GNU make file available for each sample.

Location on macOS#

The pylon samples can be found in the pylon Camera Software Suite, in the Samples folder. There is an Apple Xcode project available for each sample. The pylon for macOS samples need to be copied from the pylon Camera Software Suite to a writable location, e.g. your home directory, before they can be used with Apple Xcode.

Include Files Used by Samples#

The following include files are used by the samples shown below.

CameraEventPrinter.h#

This include file is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Contains a Camera Event Handler that prints a message for each event method call.

#ifndef INCLUDED_CAMERAEVENTPRINTER_H_4683453
#define INCLUDED_CAMERAEVENTPRINTER_H_4683453

#include <pylon/CameraEventHandler.h>
#include <pylon/ParameterIncludes.h>
#include <iostream>

namespace Pylon
{
    class CInstantCamera;

    class CCameraEventPrinter : public CCameraEventHandler
    {
    public:
        virtual void OnCameraEvent( CInstantCamera& camera, intptr_t userProvidedId, GenApi::INode* pNode )
        {
            std::cout << "OnCameraEvent event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
            std::cout << "User provided ID: " << userProvidedId << std::endl;
            std::cout << "Event data node name: " << pNode->GetName() << std::endl;
            CParameter value( pNode );
            if (value.IsValid())
            {
                std::cout << "Event node data: " << value.ToString() << std::endl;
            }
            std::cout << std::endl;
        }
    };
}

#endif /* INCLUDED_CAMERAEVENTPRINTER_H_4683453 */

ConfigurationEventPrinter.h#

This include file is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Contains a Configuration Event Handler that prints a message for each event method call.

#ifndef INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006
#define INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006

#include <pylon/ConfigurationEventHandler.h>
#include <iostream>

namespace Pylon
{
    class CInstantCamera;

    class CConfigurationEventPrinter : public CConfigurationEventHandler
    {
    public:
        void OnAttach( CInstantCamera& /*camera*/ )
        {
            std::cout << "OnAttach event" << std::endl;
        }

        void OnAttached( CInstantCamera& camera )
        {
            std::cout << "OnAttached event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnOpen( CInstantCamera& camera )
        {
            std::cout << "OnOpen event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnOpened( CInstantCamera& camera )
        {
            std::cout << "OnOpened event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnGrabStart( CInstantCamera& camera )
        {
            std::cout << "OnGrabStart event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnGrabStarted( CInstantCamera& camera )
        {
            std::cout << "OnGrabStarted event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnGrabStop( CInstantCamera& camera )
        {
            std::cout << "OnGrabStop event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnGrabStopped( CInstantCamera& camera )
        {
            std::cout << "OnGrabStopped event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnClose( CInstantCamera& camera )
        {
            std::cout << "OnClose event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnClosed( CInstantCamera& camera )
        {
            std::cout << "OnClosed event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnDestroy( CInstantCamera& camera )
        {
            std::cout << "OnDestroy event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnDestroyed( CInstantCamera& /*camera*/ )
        {
            std::cout << "OnDestroyed event" << std::endl;
        }

        void OnDetach( CInstantCamera& camera )
        {
            std::cout << "OnDetach event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnDetached( CInstantCamera& camera )
        {
            std::cout << "OnDetached event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }

        void OnGrabError( CInstantCamera& camera, const char* errorMessage )
        {
            std::cout << "OnGrabError event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
            std::cout << "Error Message: " << errorMessage << std::endl;
        }

        void OnCameraDeviceRemoved( CInstantCamera& camera )
        {
            std::cout << "OnCameraDeviceRemoved event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
        }
    };
}

#endif /* INCLUDED_CONFIGURATIONEVENTPRINTER_H_663006 */

ImageEventPrinter.h#

This include file is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Contains an Image Event Handler that prints a message for each event method call.

#ifndef INCLUDED_IMAGEEVENTPRINTER_H_7884943
#define INCLUDED_IMAGEEVENTPRINTER_H_7884943

#include <pylon/ImageEventHandler.h>
#include <pylon/GrabResultPtr.h>
#include <iostream>

namespace Pylon
{
    class CInstantCamera;

    class CImageEventPrinter : public CImageEventHandler
    {
    public:

        virtual void OnImagesSkipped( CInstantCamera& camera, size_t countOfSkippedImages )
        {
            std::cout << "OnImagesSkipped event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;
            std::cout << countOfSkippedImages << " images have been skipped." << std::endl;
            std::cout << std::endl;
        }


        virtual void OnImageGrabbed( CInstantCamera& camera, const CGrabResultPtr& ptrGrabResult )
        {
            std::cout << "OnImageGrabbed event for device " << camera.GetDeviceInfo().GetModelName() << std::endl;

            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
                std::cout << "SizeX: " << ptrGrabResult->GetWidth() << std::endl;
                std::cout << "SizeY: " << ptrGrabResult->GetHeight() << std::endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                std::cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << std::endl;
                std::cout << std::endl;
            }
            else
            {
                std::cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription() << std::endl;
            }
        }
    };
}

#endif /* INCLUDED_IMAGEEVENTPRINTER_H_7884943 */

PixelFormatAndAoiConfiguration.h#

This include file is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Contains a configuration that sets pixel data format and Image AOI.

#ifndef INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928
#define INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928

#include <pylon/ConfigurationEventHandler.h>
#include <pylon/ParameterIncludes.h>

namespace Pylon
{
    class CInstantCamera;
}
class CPixelFormatAndAoiConfiguration : public Pylon::CConfigurationEventHandler
{
public:
    void OnOpened( Pylon::CInstantCamera& camera )
    {
        try
        {
            // Allow all the names in the namespace GenApi to be used without qualification.
            using namespace Pylon;

            // Get the camera control object.
            GenApi::INodeMap& nodemap = camera.GetNodeMap();

            // Get the parameters for setting the image area of interest (Image AOI).
            CIntegerParameter width( nodemap, "Width" );
            CIntegerParameter height( nodemap, "Height" );
            CIntegerParameter offsetX( nodemap, "OffsetX" );
            CIntegerParameter offsetY( nodemap, "OffsetY" );

            // Maximize the Image AOI.
            offsetX.TrySetToMinimum(); // Set to minimum if writable.
            offsetY.TrySetToMinimum(); // Set to minimum if writable.
            width.SetToMaximum();
            height.SetToMaximum();

            // Set the pixel data format.
            CEnumParameter( nodemap, "PixelFormat" ).SetValue( "Mono8" );
        }
        catch (const Pylon::GenericException& e)
        {
            throw RUNTIME_EXCEPTION( "Could not apply configuration. const GenericException caught in OnOpened method msg=%hs", e.what() );
        }
    }
};

#endif /* INCLUDED_PIXELFORMATANDAOICONFIGURATION_H_00104928 */

SampleImageCreator.h#

This include file is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Contains functions for creating sample images.

#ifndef INCLUDED_SAMPLEIMAGECREATOR_H_2792867
#define INCLUDED_SAMPLEIMAGECREATOR_H_2792867

#include <pylon/PylonImage.h>
#include <pylon/Pixel.h>
#include <pylon/ImageFormatConverter.h>

namespace SampleImageCreator
{
    Pylon::CPylonImage CreateJuliaFractal( Pylon::EPixelType pixelType, uint32_t width, uint32_t height )
    {
        // Allow all the names in the namespace Pylon to be used without qualification.
        using namespace Pylon;

        // Define Constants.
        static const SRGB8Pixel palette[] =
        {
            {0, 28, 50}, {0, 42, 75}, {0, 56, 100}, {0, 70, 125}, {0, 84, 150},
            {0, 50, 0}, {0, 100, 0}, {0, 150, 0}, {0, 200, 0}, {0, 250, 0},
            {50, 0, 0}, {100, 0, 0}, {150, 0, 0}, {200, 0, 0}, {250, 0, 0}
        };
        uint32_t numColors = sizeof( palette ) / sizeof( palette[0] );

        const double cX = -0.735;
        const double cY = 0.11;
        const double cMaxX = 1.6;
        const double cMinX = -1.6;
        const double cMaxY = 1;
        const double cMinY = -1;
        const uint32_t cMaxIterations = 50;

        // Create image.
        CPylonImage juliaFractal( CPylonImage::Create( PixelType_RGB8packed, width, height ) );

        // Get the pointer to the first pixel.
        SRGB8Pixel* pCurrentPixel = (SRGB8Pixel*) juliaFractal.GetBuffer();

        // Compute the fractal.
        for (uint32_t pixelY = 0; pixelY < height; ++pixelY)
        {
            for (uint32_t pixelX = 0; pixelX < width; ++pixelX, ++pCurrentPixel)
            {
                long double x = ((cMaxX - cMinX) / width) * pixelX + cMinX;
                long double y = cMaxY - pixelY * ((cMaxY - cMinY) / height);
                long double xd = 0;
                long double yd = 0;
                uint32_t i = 0;

                for (; i < cMaxIterations; ++i)
                {
                    xd = x * x - y * y + cX;
                    yd = 2 * x * y + cY;
                    x = xd;
                    y = yd;
                    if ((x * x + y * y) > 4)
                    {
                        break;
                    }
                }

                if (i >= cMaxIterations)
                {
                    *pCurrentPixel = palette[0];
                }
                else
                {
                    *pCurrentPixel = palette[i % numColors];
                }
            }
        }

        // Convert the image to the target format if needed.
        if (juliaFractal.GetPixelType() != pixelType)
        {
            CImageFormatConverter converter;
            converter.OutputPixelFormat = pixelType;
            converter.OutputBitAlignment = OutputBitAlignment_MsbAligned;
            converter.Convert( juliaFractal, CPylonImage( juliaFractal ) );
        }

        // Return the image.
        return juliaFractal;
    }


    Pylon::CPylonImage CreateMandelbrotFractal( Pylon::EPixelType pixelType, uint32_t width, uint32_t height )
    {
        // Allow all the names in the namespace Pylon to be used without qualification.
        using namespace Pylon;

        // Define constants.
        static const SRGB8Pixel palette[] =
        {
            {0, 28, 50}, {0, 42, 75}, {0, 56, 100}, {0, 70, 125}, {0, 84, 150},
            {0, 50, 0}, {0, 100, 0}, {0, 150, 0}, {0, 200, 0}, {0, 250, 0},
            {50, 0, 0}, {100, 0, 0}, {150, 0, 0}, {200, 0, 0}, {250, 0, 0}
        };
        uint32_t numColors = sizeof( palette ) / sizeof( palette[0] );

        const double  cMaxX = 1.0;
        const double  cMinX = -2.0;
        const double  cMaxY = 1.2;
        const double  cMinY = -1.2;
        const uint32_t cMaxIterations = 50;

        // Create image.
        CPylonImage mandelbrotFractal( CPylonImage::Create( PixelType_RGB8packed, width, height ) );

        // Get the pointer to the first pixel.
        SRGB8Pixel* pCurrentPixel = (SRGB8Pixel*) mandelbrotFractal.GetBuffer();

        // Compute the fractal.
        for (uint32_t pixelY = 0; pixelY < height; ++pixelY)
        {
            for (uint32_t pixelX = 0; pixelX < width; ++pixelX, ++pCurrentPixel)
            {
                long double xStart = ((cMaxX - cMinX) / width) * pixelX + cMinX;
                long double yStart = cMaxY - pixelY * ((cMaxY - cMinY) / height);
                long double x = xStart;
                long double y = yStart;
                long double xd = 0;
                long double yd = 0;
                uint32_t i = 0;

                for (; i < cMaxIterations; ++i)
                {
                    xd = x * x - y * y + xStart;
                    yd = 2 * x * y + yStart;
                    x = xd;
                    y = yd;
                    if ((x * x + y * y) > 4)
                    {
                        break;
                    }
                }

                if (i >= cMaxIterations)
                {
                    *pCurrentPixel = palette[0];
                }
                else
                {
                    *pCurrentPixel = palette[i % numColors];
                }
            }
        }

        // Convert the image to the target format if needed.
        if (mandelbrotFractal.GetPixelType() != pixelType)
        {
            CImageFormatConverter converter;
            converter.OutputPixelFormat = pixelType;
            converter.OutputBitAlignment = OutputBitAlignment_MsbAligned;
            converter.Convert( mandelbrotFractal, CPylonImage( mandelbrotFractal ) );
        }

        // Return the image.
        return mandelbrotFractal;
    }

}

#endif /* INCLUDED_SAMPLEIMAGECREATOR_H_2792867 */

DeviceRemovalHandling#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// DeviceRemovalHandling.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample program demonstrates how to be informed about the removal of a camera device.
    It also shows how to reconnect to a removed device.

    Attention:
    If you build this sample in debug mode and run it using a GigE camera device, pylon will set the heartbeat
    timeout to 60 minutes. This is done to allow debugging and single-stepping without losing the camera
    connection due to missing heartbeats. However, with this setting, it would take 60 minutes for the
    application to notice that a GigE device has been disconnected.
    As a workaround, the heartbeat timeout is set to 1000 ms.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#include "../include/ConfigurationEventPrinter.h"

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;


// When using device-specific Instant Camera classes there are specific Configuration event handler classes available which can be used, for example
// Pylon::CBaslerUniversalConfigurationEventHandler.
//Example of a configuration event handler that handles device removal events.
class CSampleConfigurationEventHandler : public Pylon::CConfigurationEventHandler
{
public:
    // This method is called from a different thread when the camera device removal has been detected.
    void OnCameraDeviceRemoved( CInstantCamera& /*camera*/ )
    {
        // Print two new lines, just for improving printed output.
        cout << endl << endl;
        cout << "CSampleConfigurationEventHandler::OnCameraDeviceRemoved called." << std::endl;
    }
};

// Time to wait in quarters of seconds.
static const uint32_t c_loopCounterInitialValue = 60 * 4;

 int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Declare a local counter used for waiting.
        int loopCount = 0;

        // Get the transport layer factory.
        CTlFactory& tlFactory = CTlFactory::GetInstance();

        // Create an instant camera object with the camera device found first.
        CInstantCamera camera( tlFactory.CreateFirstDevice() );

        // Print the camera information.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
        cout << "Friendly Name: " << camera.GetDeviceInfo().GetFriendlyName() << endl;
        cout << "Full Name    : " << camera.GetDeviceInfo().GetFullName() << endl;
        cout << "SerialNumber : " << camera.GetDeviceInfo().GetSerialNumber() << endl;
        cout << endl;

        // For demonstration purposes only, register another configuration event handler that handles device removal.
        camera.RegisterConfiguration( new CSampleConfigurationEventHandler, RegistrationMode_Append, Cleanup_Delete );

        // For demonstration purposes only, add a sample configuration event handler to print out information
        // about camera use.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete );

        // Open the camera. Camera device removal is only detected while the camera is open.
        camera.Open();

        // Now, try to detect that the camera has been removed:

        // Ask the user to disconnect a device
        loopCount = c_loopCounterInitialValue;
        cout << endl << "Please disconnect the device (timeout " << loopCount / 4 << "s) " << endl;

        /////////////////////////////////////////////////// don't single step beyond this line  (see comments above)

        // Before testing the callbacks, we manually set the heartbeat timeout to a short value when using GigE cameras.
        // Since for debug versions the heartbeat timeout has been set to 5 minutes, it would take up to 5 minutes
        // until detection of the device removal.
        CIntegerParameter heartbeat( camera.GetTLNodeMap(), "HeartbeatTimeout" );
        heartbeat.TrySetValue( 1000, IntegerValueCorrection_Nearest );  // set to 1000 ms timeout if writable

        try
        {
            // Get a camera parameter using generic parameter access.
            CIntegerParameter width( camera.GetNodeMap(), "Width" );

            // The following loop accesses the camera. It could also be a loop that is
            // grabbing images. The device removal is handled in the exception handler.
            while (loopCount > 0)
            {
                // Print a "." every few seconds to tell the user we're waiting for the callback.
                if (--loopCount % 4 == 0)
                {
                    cout << ".";
                    cout.flush();
                }
                WaitObject::Sleep( 250 );

                // Change the width value in the camera depending on the loop counter.
                // Any access to the camera like setting parameters or grabbing images
                // will fail throwing an exception if the camera has been disconnected.
                width.SetValue( width.GetMax() - (width.GetInc() * (loopCount % 2)) );
            }

        }
        catch (const GenericException& e)
        {
            // An exception occurred. Is it because the camera device has been physically removed?

            // Known issue: Wait until the system safely detects a possible removal.
            WaitObject::Sleep( 1000 );

            if (camera.IsCameraDeviceRemoved())
            {
                // The camera device has been removed. This caused the exception.
                cout << endl;
                cout << "The camera has been removed from the computer." << endl;
                cout << "The camera device removal triggered an expected exception:" << endl
                    << e.GetDescription() << endl;
            }
            else
            {
                // An unexpected error has occurred.

                // In this example it is handled by exiting the program.
                throw;
            }
        }

        if (!camera.IsCameraDeviceRemoved())
            cout << endl << "Timeout expired" << endl;

        /////////////////////////////////////////////////// Safe to use single stepping (see comments above).

        // Now try to find the detached camera after it has been attached again:

        // Create a device info object for remembering the camera properties.
        CDeviceInfo info;

        // Remember the camera properties that allow detecting the same camera again.
        info.SetDeviceClass( camera.GetDeviceInfo().GetDeviceClass() );
        info.SetSerialNumber( camera.GetDeviceInfo().GetSerialNumber() );

        // Destroy the Pylon Device representing the detached camera device.
        // It can't be used anymore.
        camera.DestroyDevice();

        // Ask the user to connect the same device.
        loopCount = c_loopCounterInitialValue;
        cout << endl << "Please connect the same device to the computer again (timeout " << loopCount / 4 << "s) " << endl;

        // Create a filter containing the CDeviceInfo object info which describes the properties of the device we are looking for.
        DeviceInfoList_t filter;
        filter.push_back( info );

        for (; loopCount > 0; --loopCount)
        {
            // Print a . every few seconds to tell the user we're waiting for the camera to be attached
            if (loopCount % 4 == 0)
            {
                cout << ".";
                cout.flush();
            }

            // Try to find the camera we are looking for.
            DeviceInfoList_t devices;
            if (tlFactory.EnumerateDevices( devices, filter ) > 0)
            {
                // Print two new lines, just for improving printed output.
                cout << endl << endl;

                // The camera has been found. Create and attach it to the Instant Camera object.
                camera.Attach( tlFactory.CreateDevice( devices[0] ) );
                //Exit waiting
                break;
            }

            WaitObject::Sleep( 250 );
        }

        // If the camera has been found.
        if (camera.IsPylonDeviceAttached())
        {
            // Print the camera information.
            cout << endl;
            cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;
            cout << "Friendly Name: " << camera.GetDeviceInfo().GetFriendlyName() << endl;
            cout << "Full Name    : " << camera.GetDeviceInfo().GetFullName() << endl;
            cout << "SerialNumber : " << camera.GetDeviceInfo().GetSerialNumber() << endl;
            cout << endl;

            // All configuration objects and other event handler objects are still registered.
            // The configuration objects will parameterize the camera device and the instant
            // camera will be ready for operation again.

            // Open the camera.
            camera.Open();

            // Now the Instant Camera object can be used as before.
        }
        else // Timeout
        {
            cout << endl << "Timeout expired." << endl;
        }
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample illustrates how to grab and process images using the CInstantCamera class.
    The images are grabbed and processed asynchronously, i.e.,
    while the application is processing a buffer, the acquisition of the next buffer is done
    in parallel.

    The CInstantCamera class uses a pool of buffers to retrieve image data
    from the camera device. Once a buffer is filled and ready,
    the buffer can be retrieved from the camera object for processing. The buffer
    and additional image data are collected in a grab result. The grab result is
    held by a smart pointer after retrieval. The buffer is automatically reused
    when explicitly released or when the smart pointer object is destroyed.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#    include <pylon/PylonGUI.h>
#endif

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 100;

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Create an instant camera object with the camera device found first.
        CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Print the model name of the camera.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;

        // The parameter MaxNumBuffer can be used to control the count of buffers
        // allocated for grabbing. The default value of this parameter is 10.
        camera.MaxNumBuffer = 5;

        // Start the grabbing of c_countOfImagesToGrab images.
        // The camera device is parameterized with a default configuration which
        // sets up free-running continuous acquisition.
        camera.StartGrabbing( c_countOfImagesToGrab );

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved.
        while (camera.IsGrabbing())
        {
            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );

            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
                // Access the image data.
                cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
                cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;

#ifdef PYLON_WIN_BUILD
                // Display the grabbed image.
                Pylon::DisplayImage( 1, ptrGrabResult );
#endif
            }
            else
            {
                cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription() << endl;
            }
        }
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_CameraEvents#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_CameraEvents.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    It is shown in this sample how to register event handlers indicating the arrival of events
    sent by the camera. For demonstration purposes, several different handlers are registered
    for the same event.

    Basler USB3 Vision and GigE Vision cameras can send event messages. For example, when a sensor
    exposure has finished, the camera can send an Exposure End event to the computer. The event
    can be received by the computer before the image data of the finished exposure has been transferred
    completely. This sample demonstrates how to be notified when camera event message data is received.

    The event messages are automatically retrieved and processed by the InstantCamera classes.
    The information carried by event messages is exposed as parameter nodes in the camera node map
    and can be accessed like standard camera parameters. These nodes are updated
    when a camera event is received. You can register camera event handler objects that are
    triggered when event data has been received.

    These mechanisms are demonstrated for the Exposure End and the Event Overrun events.
    The Exposure End event carries the following information:
    * ExposureEndEventFrameID: Number of the image that has been exposed.
    * ExposureEndEventTimestamp: Time when the event was generated.
    The Event Overrun event is sent by the camera as a warning that events are being dropped. The
    notification contains no specific information about how many or which events have been dropped.
    Events may be dropped if events are generated at a high frequency and if there isn't enough
    bandwidth available to send the events.

    Note: Different camera series implement different versions of the Standard Feature Naming Convention (SFNC).
    That's why the name and the type of the parameters used can be different.
*/


// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>

// Include file to use pylon universal instant camera parameters.
#include <pylon/BaslerUniversalInstantCamera.h>

// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/CameraEventPrinter.h"

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using pylon universal instant camera parameters.
using namespace Basler_UniversalCameraParams;

// Namespace for using cout.
using namespace std;

//Enumeration used for distinguishing different events.
enum MyEvents
{
    eMyExposureEndEvent = 100,
    eMyEventOverrunEvent = 200
    // More events can be added here.
};

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;


// Example handler for camera events.
class CSampleCameraEventHandler : public CBaslerUniversalCameraEventHandler
{
public:
    // Only very short processing tasks should be performed by this method. Otherwise, the event notification will block the
    // processing of images.
    virtual void OnCameraEvent( CBaslerUniversalInstantCamera& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */ )
    {
        std::cout << std::endl;
        switch (userProvidedId)
        {
            case eMyExposureEndEvent: // Exposure End event
                if (camera.EventExposureEndFrameID.IsReadable()) // Applies to cameras based on SFNC 2.0 or later, e.g, USB cameras
                {
                    cout << "Exposure End event. FrameID: " << camera.EventExposureEndFrameID.GetValue() << " Timestamp: " << camera.EventExposureEndTimestamp.GetValue() << std::endl << std::endl;
                }
                else
                {
                    cout << "Exposure End event. FrameID: " << camera.ExposureEndEventFrameID.GetValue() << " Timestamp: " << camera.ExposureEndEventTimestamp.GetValue() << std::endl << std::endl;
                }
                break;
            case eMyEventOverrunEvent:  // Event Overrun event
                cout << "Event Overrun event. FrameID: " << camera.EventOverrunEventFrameID.GetValue() << " Timestamp: " << camera.EventOverrunEventTimestamp.GetValue() << std::endl << std::endl;
                break;
        }
    }
};

//Example of an image event handler.
class CSampleImageEventHandler : public CImageEventHandler
{
public:
    virtual void OnImageGrabbed( CInstantCamera& /*camera*/, const CGrabResultPtr& /*ptrGrabResult*/ )
    {
        cout << "CSampleImageEventHandler::OnImageGrabbed called." << std::endl;
        cout << std::endl;
        cout << std::endl;
    }
};

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    // Create an example event handler. In the present case, we use one single camera handler for handling multiple camera events.
    // The handler prints a message for each received event.
    CSampleCameraEventHandler* pHandler1 = new CSampleCameraEventHandler;

    // Create another more generic event handler printing out information about the node for which an event callback
    // is fired.
    CCameraEventPrinter* pHandler2 = new CCameraEventPrinter;

    try
    {
        // Create an instant camera object with the first found camera device.
        CBaslerUniversalInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Register the standard configuration event handler for enabling software triggering.
        // The software trigger configuration handler replaces the default configuration
        // as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
        camera.RegisterConfiguration( new CSoftwareTriggerConfiguration, RegistrationMode_ReplaceAll, Cleanup_Delete );

        // For demonstration purposes only, registers an event handler configuration to print out information about camera use.
        // The event handler configuration is appended to the registered software trigger configuration handler by setting 
        // registration mode to RegistrationMode_Append.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete ); // Camera use.

        // For demonstration purposes only, register another image event handler.
        camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete );

        // Camera event processing must be activated first, the default is off.
        camera.GrabCameraEvents = true;


        // Open the camera for setting parameters.
        camera.Open();

        // Check if the device supports events.
        if (!camera.EventSelector.IsWritable())
        {
            throw RUNTIME_EXCEPTION( "The device doesn't support events." );
        }



        // Cameras based on SFNC 2.0 or later, e.g., USB cameras
        if (camera.GetSfncVersion() >= Sfnc_2_0_0)
        {
            // Register an event handler for the Exposure End event. For each event type, there is a "data" node
            // representing the event. The actual data that is carried by the event is held by child nodes of the
            // data node. In the case of the Exposure End event, the child nodes are EventExposureEndFrameID and EventExposureEndTimestamp.
            // The CSampleCameraEventHandler demonstrates how to access the child nodes within
            // a callback that is fired for the parent data node.
            // The user-provided ID eMyExposureEndEvent can be used to distinguish between multiple events (not shown).
            camera.RegisterCameraEventHandler( pHandler1, "EventExposureEndData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None );
            // The handler is registered for both, the EventExposureEndFrameID and the EventExposureEndTimestamp
            // node. These nodes represent the data carried by the Exposure End event.
            // For each Exposure End event received, the handler will be called twice, once for the frame ID, and
            // once for the time stamp.
            camera.RegisterCameraEventHandler( pHandler2, "EventExposureEndFrameID", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None );
            camera.RegisterCameraEventHandler( pHandler2, "EventExposureEndTimestamp", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None );
        }
        else
        {
            // Register an event handler for the Exposure End event. For each event type, there is a "data" node
            // representing the event. The actual data that is carried by the event is held by child nodes of the
            // data node. In the case of the Exposure End event, the child nodes are ExposureEndEventFrameID, ExposureEndEventTimestamp,
            // and ExposureEndEventStreamChannelIndex. The CSampleCameraEventHandler demonstrates how to access the child nodes within
            // a callback that is fired for the parent data node.
            camera.RegisterCameraEventHandler( pHandler1, "ExposureEndEventData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None );

            // Register the same handler for a second event. The user-provided ID can be used
            // to distinguish between the events.
            camera.RegisterCameraEventHandler( pHandler1, "EventOverrunEventData", eMyEventOverrunEvent, RegistrationMode_Append, Cleanup_None );

            // The handler is registered for both, the ExposureEndEventFrameID and the ExposureEndEventTimestamp
            // node. These nodes represent the data carried by the Exposure End event.
            // For each Exposure End event received, the handler will be called twice, once for the frame ID, and
            // once for the time stamp.
            camera.RegisterCameraEventHandler( pHandler2, "ExposureEndEventFrameID", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None );
            camera.RegisterCameraEventHandler( pHandler2, "ExposureEndEventTimestamp", eMyExposureEndEvent, RegistrationMode_Append, Cleanup_None );
        }

        // Enable sending of Exposure End events.
        // Select the event to receive.
        camera.EventSelector.SetValue( EventSelector_ExposureEnd );

        // Enable it.
        if (!camera.EventNotification.TrySetValue( EventNotification_On ))
        {
            // scout-f, scout-g, and aviator GigE cameras use a different value
            camera.EventNotification.SetValue( EventNotification_GenICamEvent );
        }


        // Enable event notification for the EventOverrun event, if available
        if (camera.EventSelector.TrySetValue( EventSelector_EventOverrun ))
        {
            // Enable it.
            if (!camera.EventNotification.TrySetValue( EventNotification_On ))
            {
                // scout-f, scout-g, and aviator GigE cameras use a different value
                camera.EventNotification.SetValue( EventNotification_GenICamEvent );
            }
        }


        // Start the grabbing of c_countOfImagesToGrab images.
        camera.StartGrabbing( c_countOfImagesToGrab );

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved.
        while (camera.IsGrabbing())
        {
            // Execute the software trigger. Wait up to 1000 ms for the camera to be ready for trigger.
            if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException ))
            {
                camera.ExecuteSoftwareTrigger();
            }

            // Retrieve grab results and notify the camera event and image event handlers.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );
            // Nothing to do here with the grab result, the grab results are handled by the registered event handler.
        }

        // Disable sending Exposure End events.
        camera.EventSelector.SetValue( EventSelector_ExposureEnd );
        camera.EventNotification.SetValue( EventNotification_Off );

        // Disable sending Event Overrun events.
        if (camera.EventSelector.TrySetValue( EventSelector_EventOverrun ))
        {
            camera.EventNotification.SetValue( EventNotification_Off );
        }
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Delete the event handlers.
    delete pHandler1;
    delete pHandler2;

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_ChunkImage#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_ChunkImage.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    Basler cameras provide chunk features: The cameras can generate certain information about each image,
    e.g. frame counters, timestamps and CRC checksums, that is appended to the image data as data "chunks".
    This sample illustrates how to enable chunk features, how to grab images and how to process the appended
    data. When the camera is in chunk mode, it transfers data blocks that are partitioned into chunks. The first
    chunk is always the image data. When chunk features are enabled, the image data chunk is followed by chunks
    containing the information generated by the chunk features.
*/

// Include files to use the pylon API
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#    include <pylon/PylonGUI.h>
#endif

// Include file to use pylon universal instant camera parameters.
#include <pylon/BaslerUniversalInstantCamera.h>

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using pylon universal instant camera parameters.
using namespace Basler_UniversalCameraParams;

// Namespace for using cout.
using namespace std;


// Example of a device-specific handler for image events.
class CSampleImageEventHandler : public CBaslerUniversalImageEventHandler
{
public:
    virtual void OnImageGrabbed( CBaslerUniversalInstantCamera& /*camera*/, const CBaslerUniversalGrabResultPtr& ptrGrabResult )
    {
        // Image grabbed successfully?
        if (ptrGrabResult->GrabSucceeded())
        {
            // The chunk data is attached to the grab result and can be accessed anywhere.

            // Generic parameter access:
            // This shows the access via the chunk data node map. This method is available for all grab result types.
            CIntegerParameter chunkTimestamp( ptrGrabResult->GetChunkDataNodeMap(), "ChunkTimestamp" );

            // Access the chunk data attached to the result.
            // Before accessing the chunk data, you should check to see
            // if the chunk is readable. When it is readable, the buffer
            // contains the requested chunk data.
            if (chunkTimestamp.IsReadable())
                cout << "OnImageGrabbed: TimeStamp (Result) accessed via node map: " << chunkTimestamp.GetValue() << endl;

            // Native parameter access:
            // When using the device-specific grab results the chunk data can be accessed
            // via the members of the grab result data.
            if (ptrGrabResult->ChunkTimestamp.IsReadable())
                cout << "OnImageGrabbed: TimeStamp (Result) accessed via result member: " << ptrGrabResult->ChunkTimestamp.GetValue() << endl;
        }
    }
};

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Create an instant camera object with the first found camera device.
        CBaslerUniversalInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Print the model name of the camera.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;

        // Register an image event handler that accesses the chunk data.
        camera.RegisterImageEventHandler( new CSampleImageEventHandler, RegistrationMode_Append, Cleanup_Delete );

        // Open the camera.
        camera.Open();

        // A GenICam node map is required for accessing chunk data. That's why a small node map is required for each grab result.
        // Creating a lot of node maps can be time consuming.
        // The node maps are usually created dynamically when StartGrabbing() is called.
        // To avoid a delay caused by node map creation in StartGrabbing() you have the option to create
        // a static pool of node maps once before grabbing.
        //camera.StaticChunkNodeMapPoolSize = camera.MaxNumBuffer.GetValue();

        // Enable chunks in general.
        if (!camera.ChunkModeActive.TrySetValue( true ))
        {
            throw RUNTIME_EXCEPTION( "The camera doesn't support chunk features" );
        }

        // Enable time stamp chunks.
        camera.ChunkSelector.SetValue( ChunkSelector_Timestamp );
        camera.ChunkEnable.SetValue( true );

        // Enable frame counter chunks?
        if (camera.ChunkSelector.TrySetValue( ChunkSelector_Framecounter ))
        {
            // USB camera devices provide generic counters.
            // An explicit FrameCounter value is not provided by USB camera devices.
            // Enable frame counter chunks.
            camera.ChunkEnable.SetValue( true );
        }

        // Enable CRC checksum chunks.
        camera.ChunkSelector.SetValue( ChunkSelector_PayloadCRC16 );
        camera.ChunkEnable.SetValue( true );

        // Start the grabbing of c_countOfImagesToGrab images.
        // The camera device is parameterized with a default configuration which
        // sets up free-running continuous acquisition.
        camera.StartGrabbing( c_countOfImagesToGrab );

        // This smart pointer will receive the grab result data.
        CBaslerUniversalGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved.
        while (camera.IsGrabbing())
        {
            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
            // RetrieveResult calls the image event handler's OnImageGrabbed method.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );
            cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;

            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
#ifdef PYLON_WIN_BUILD
                // Display the image
                Pylon::DisplayImage( 1, ptrGrabResult );
#endif

                // The result data is automatically filled with received chunk data.
                // (Note:  This is not the case when using the low-level API)
                cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
                cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl;

                // Check to see if a buffer containing chunk data has been received.
                if (PayloadType_ChunkData != ptrGrabResult->GetPayloadType())
                {
                    throw RUNTIME_EXCEPTION( "Unexpected payload type received." );
                }

                // Since we have activated the CRC Checksum feature, we can check
                // the integrity of the buffer first.
                // Note: Enabling the CRC Checksum feature is not a prerequisite for using
                // chunks. Chunks can also be handled when the CRC Checksum feature is deactivated.
                if (ptrGrabResult->HasCRC() && ptrGrabResult->CheckCRC() == false)
                {
                    throw RUNTIME_EXCEPTION( "Image was damaged!" );
                }

                // Access the chunk data attached to the result.
                // Before accessing the chunk data, you should check to see
                // if the chunk is readable. When it is readable, the buffer
                // contains the requested chunk data.
                if (ptrGrabResult->ChunkTimestamp.IsReadable())
                {
                    cout << "TimeStamp (Result): " << ptrGrabResult->ChunkTimestamp.GetValue() << endl;
                }

                // USB camera devices provide generic counters. An explicit FrameCounter value is not provided by USB camera devices.
                if (ptrGrabResult->ChunkFramecounter.IsReadable())
                {
                    cout << "FrameCounter (Result): " << ptrGrabResult->ChunkFramecounter.GetValue() << endl;
                }

                cout << endl;
            }
            else
            {
                cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription() << endl;
            }
        }

        // Disable chunk mode.
        camera.ChunkModeActive.SetValue( false );
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_MultiCast#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_MultiCast.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample demonstrates how to open a camera in multicast mode
    and how to receive a multicast stream.

    Two instances of this application must be started simultaneously on different computers.
    The first application started on computer A acts as the controlling application and has full access to the GigE camera.
    The second instance started on computer B opens the camera in monitor mode.
    This instance is not able to control the camera but can receive multicast streams.

    To get the sample running, start this application first on computer A in control mode.
    After computer A has begun to receive frames, start the second instance of this
    application on computer B in monitor mode.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#    include <pylon/PylonGUI.h>
#endif

// Include file to use pylon universal instant camera parameters.
#include <pylon/BaslerUniversalInstantCamera.h>

// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"

// Include file for _kbhit
#if defined(PYLON_WIN_BUILD)
#include <conio.h>
#elif defined(PYLON_UNIX_BUILD)
#    include <stdio.h>
#    include <termios.h>
#    include <unistd.h>
#    include <fcntl.h>
#endif

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

// Namespace for using pylon universal instant camera parameters.
using namespace Basler_UniversalCameraParams;
using namespace Basler_UniversalStreamParams;

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 100;


bool KeyPressed( void )
{
#if defined(PYLON_WIN_BUILD)
    return _kbhit() != 0;
#elif defined(PYLON_UNIX_BUILD)
    struct termios savedTermios;
    int savedFL;
    struct termios termios;
    int ch;

    tcgetattr( STDIN_FILENO, &savedTermios );
    savedFL = fcntl( STDIN_FILENO, F_GETFL, 0 );

    termios = savedTermios;
    termios.c_lflag &= ~(ICANON | ECHO);
    tcsetattr( STDIN_FILENO, TCSANOW, &termios );
    fcntl( STDIN_FILENO, F_SETFL, savedFL | O_NONBLOCK );

    ch = getchar();

    fcntl( STDIN_FILENO, F_SETFL, savedFL );
    tcsetattr( STDIN_FILENO, TCSANOW, &savedTermios );

    if (ch != EOF)
    {
        ungetc( ch, stdin );
    }

    return ch != EOF;
#endif
}

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    // Query the user for the mode to use.
    // Ask the user to launch the multicast controlling application or the multicast monitoring application.
    cout << "Start multicast sample in (c)ontrol or in (m)onitor mode? (c/m) "; cout.flush();

    char key;

    do
    {
        cin.get( key );
        // Remove newline from stdin.
        cin.get();
    }
    while ((key != 'c') && (key != 'm') && (key != 'C') && (key != 'M'));

    bool monitorMode = (key == 'm') || (key == 'M');

    try
    {
        // Only look for GigE cameras.
        CDeviceInfo info;
        info.SetDeviceClass( Pylon::BaslerGigEDeviceClass );

        // Create an instant camera object for the GigE camera found first.
        CBaslerUniversalInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice( info ) );

        // The default configuration must be removed when monitor mode is selected
        // because the monitoring application is not allowed to modify any parameter settings.
        if (monitorMode)
        {
            camera.RegisterConfiguration( (CConfigurationEventHandler*) NULL, RegistrationMode_ReplaceAll, Cleanup_None );
        }

        // For demonstration purposes only, registers an event handler configuration to print out information about camera use.
        // The event handler configuration is appended to the registered software trigger configuration handler by setting 
        // registration mode to RegistrationMode_Append.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete ); // Camera use.
        camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete );     // Image grabbing.

        // Print the model name of the camera.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;

        // Monitor mode selected.
        if (monitorMode)
        {
            // Set MonitorModeActive to true to act as monitor
            camera.MonitorModeActive = true;

            // Open the camera.
            camera.Open();

            // Select transmission type. If the camera is already controlled by another application
            // and configured for multicast, the active camera configuration can be used
            // (IP Address and Port will be set automatically).
            camera.GetStreamGrabberParams().TransmissionType = TransmissionType_UseCameraConfig;

            // Alternatively, the stream grabber could be explicitly set to "multicast"...
            // In this case, the IP Address and the IP port must also be set.
            //
            //camera.GetStreamGrabberParams().TransmissionType = TransmissionType_Multicast;
            //camera.GetStreamGrabberParams().DestinationAddr = "239.0.0.1";
            //camera.GetStreamGrabberParams().DestinationPort = 49152;

            if (camera.GetStreamGrabberParams().DestinationAddr.GetValue() != "0.0.0.0" &&
                 camera.GetStreamGrabberParams().DestinationPort.GetValue() != 0)
            {
                camera.StartGrabbing( c_countOfImagesToGrab );
            }
            else
            {
                cerr << endl << "Failed to open stream grabber (monitor mode): The acquisition is not yet started by the controlling application." << endl;
                cerr << endl << "Start the controlling application before starting the monitor application" << endl;
            }
        }
        // Controlling mode selected.
        else
        {
            // Open the camera.
            camera.Open();

            // Set transmission type to "multicast"...
            // In this case, the IP Address and the IP port must also be set.
            camera.GetStreamGrabberParams().TransmissionType = TransmissionType_Multicast;
            // camera.GetStreamGrabberParams().DestinationAddr = "239.0.0.1";    // These are default values.
            // camera.GetStreamGrabberParams().DestinationPort = 49152;

            // Maximize the image area of interest (Image AOI).
            camera.OffsetX.TrySetToMinimum();
            camera.OffsetY.TrySetToMinimum();
            camera.Width.SetToMaximum();
            camera.Height.SetToMaximum();

            // Set the pixel data format.
            camera.PixelFormat.SetValue( PixelFormat_Mono8 );

            camera.StartGrabbing();
        }

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved in monitor mode
        // or when a key is pressed and the camera object is destroyed.
        while (!KeyPressed() && camera.IsGrabbing())
        {
            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );

#ifdef PYLON_WIN_BUILD
            // Display the image
            Pylon::DisplayImage( 1, ptrGrabResult );
#endif

            // The grab result could now be processed here.
        }
    }
    catch (const GenericException& e)
    {
        // Error handling
        cerr << "An exception occurred." << endl << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following three lines to disable wait on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_MultipleCameras#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_MultipleCameras.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample illustrates how to grab and process images from multiple cameras
    using the CInstantCameraArray class. The CInstantCameraArray class represents
    an array of instant camera objects. It provides almost the same interface
    as the instant camera for grabbing.
    The main purpose of the CInstantCameraArray is to simplify waiting for images and
    camera events of multiple cameras in one thread. This is done by providing a single
    RetrieveResult method for all cameras in the array.
    Alternatively, the grabbing can be started using the internal grab loop threads
    of all cameras in the CInstantCameraArray. The grabbed images can then be processed by one or more
    image event handlers. Please note that this is not shown in this example.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#    include <pylon/PylonGUI.h>
#endif

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 10;

// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with multiple cameras.
// This applies, for instance, if two GigE cameras are connected to the same network adapter via a switch.
// To manage the bandwidth, the GevSCPD interpacket delay parameter and the GevSCFTD transmission delay
// parameter can be set for each GigE camera device.
// The "Controlling Packet Transmission Timing with the Interpacket and Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Notes (AW000649xx000)
// provide more information about this topic.
// The bandwidth used by a GigE camera device can be limited by adjusting the packet size.
static const size_t c_maxCamerasToUse = 2;

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Get the transport layer factory.
        CTlFactory& tlFactory = CTlFactory::GetInstance();

        // Get all attached devices and exit application if no device is found.
        DeviceInfoList_t devices;
        if (tlFactory.EnumerateDevices( devices ) == 0)
        {
            throw RUNTIME_EXCEPTION( "No camera present." );
        }

        // Create an array of instant cameras for the found devices and avoid exceeding a maximum number of devices.
        CInstantCameraArray cameras( min( devices.size(), c_maxCamerasToUse ) );

        // Create and attach all Pylon Devices.
        for (size_t i = 0; i < cameras.GetSize(); ++i)
        {
            cameras[i].Attach( tlFactory.CreateDevice( devices[i] ) );

            // Print the model name of the camera.
            cout << "Using device " << cameras[i].GetDeviceInfo().GetModelName() << endl;
        }

        // Starts grabbing for all cameras starting with index 0. The grabbing
        // is started for one camera after the other. That's why the images of all
        // cameras are not taken at the same time.
        // However, a hardware trigger setup can be used to cause all cameras to grab images synchronously.
        // According to their default configuration, the cameras are
        // set up for free-running continuous acquisition.
        cameras.StartGrabbing();

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Grab c_countOfImagesToGrab from the cameras.
        for (uint32_t i = 0; i < c_countOfImagesToGrab && cameras.IsGrabbing(); ++i)
        {
            cameras.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );

            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
                // When the cameras in the array are created the camera context value
                // is set to the index of the camera in the array.
                // The camera context is a user settable value.
                // This value is attached to each grab result and can be used
                // to determine the camera that produced the grab result.
                intptr_t cameraContextValue = ptrGrabResult->GetCameraContext();

#ifdef PYLON_WIN_BUILD
                // Show the image acquired by each camera in the window related to each camera.
                Pylon::DisplayImage( cameraContextValue, ptrGrabResult );
#endif

                // Print the index and the model name of the camera.
                cout << "Camera " << cameraContextValue << ": " << cameras[cameraContextValue].GetDeviceInfo().GetModelName() << endl;

                // Now, the image data can be processed.
                cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
                cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
                cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
            }
            else
            {
                cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription() << endl;
            }
        }
    }
    catch (const GenericException& e)
    {
        // Error handling
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_Strategies#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_Strategies.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample shows the use of the different grab strategies.

    There are different strategies to grab images with the Instant Camera grab engine:
    * One By One: This strategy is the default grab strategy. Acquisitioned images are processed in their arrival order.
    * Latest Image Only: Differs from the One By One strategy by a single image output queue. Therefore, only the latest
    image is kept in the output output queue, all other grabbed images are skipped. 
    * Latest Images: Extends the above strategies by adjusting the size of output queue. If the output queue has a size of
    1, it is equal to the Latest Image Only strategy. Consequently, setting the output queue size to 
    CInstantCamera::MaxNumBuffer is equal to One by One.
    * Upcoming Image Grab: Ensures that the image grabbed is the next image received from the camera. When retrieving an 
    image, a buffer is queued into the input queue and then the call waits for the upcoming image. Subsequently, image data 
    is grabbed into the buffer and returned. Note: This strategy can't be used together with USB camera devices. 

*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>

// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;


int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Create an instant camera object for the camera device found first.
        CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Register the standard configuration event handler for enabling software triggering.
        // The software trigger configuration handler replaces the default configuration
        // as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
        camera.RegisterConfiguration( new CSoftwareTriggerConfiguration, RegistrationMode_ReplaceAll, Cleanup_Delete );

        // For demonstration purposes only, registers an event handler configuration to print out information about camera use.
        // The event handler configuration is appended to the registered software trigger configuration handler by setting 
        // registration mode to RegistrationMode_Append.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete );
        camera.RegisterImageEventHandler( new CImageEventPrinter, RegistrationMode_Append, Cleanup_Delete );

        // Print the model name of the camera.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;

        // The MaxNumBuffer parameter can be used to control the count of buffers
        // allocated for grabbing. The default value of this parameter is 10.
        camera.MaxNumBuffer = 15;

        // Open the camera.
        camera.Open();


        // Can the camera device be queried whether it is ready to accept the next frame trigger?
        if (camera.CanWaitForFrameTriggerReady())
        {
            cout << "Grab using the GrabStrategy_OneByOne default strategy:" << endl << endl;

            // The GrabStrategy_OneByOne strategy is used. The images are processed
            // in the order of their arrival.
            camera.StartGrabbing( GrabStrategy_OneByOne );

            // In the background, the grab engine thread retrieves the
            // image data and queues the buffers into the internal output queue.

            // Issue software triggers. For each call, wait up to 1000 ms until the camera is ready for triggering the next image.
            for (int i = 0; i < 3; ++i)
            {
                if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException ))
                {
                    camera.ExecuteSoftwareTrigger();
                }
            }

            // For demonstration purposes, wait for the last image to appear in the output queue.
            WaitObject::Sleep( 3 * 1000 );

            // Check that grab results are waiting.
            if (camera.GetGrabResultWaitObject().Wait( 0 ))
            {
                cout << endl << "Grab results wait in the output queue." << endl << endl;
            }

            // All triggered images are still waiting in the output queue
            // and are now retrieved.
            // The grabbing continues in the background, e.g. when using hardware trigger mode,
            // as long as the grab engine does not run out of buffers.
            int nBuffersInQueue = 0;
            while (camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return ))
            {
                nBuffersInQueue++;
            }
            cout << "Retrieved " << nBuffersInQueue << " grab results from output queue." << endl << endl;

            //Stop the grabbing.
            camera.StopGrabbing();



            cout << endl << "Grab using strategy GrabStrategy_LatestImageOnly:" << endl << endl;

            // The GrabStrategy_LatestImageOnly strategy is used. The images are processed
            // in the order of their arrival but only the last received image
            // is kept in the output queue.
            // This strategy can be useful when the acquired images are only displayed on the screen.
            // If the processor has been busy for a while and images could not be displayed automatically
            // the latest image is displayed when processing time is available again.
            camera.StartGrabbing( GrabStrategy_LatestImageOnly );

            // Execute the software trigger, wait actively until the camera accepts the next frame trigger or until the timeout occurs.
            for (int i = 0; i < 3; ++i)
            {
                if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException ))
                {
                    camera.ExecuteSoftwareTrigger();
                }
            }

            // Wait for all images.
            WaitObject::Sleep( 3 * 1000 );

            // Check whether the grab result is waiting.
            if (camera.GetGrabResultWaitObject().Wait( 0 ))
            {
                cout << endl << "A grab result waits in the output queue." << endl << endl;
            }

            // Only the last received image is waiting in the internal output queue
            // and is now retrieved.
            // The grabbing continues in the background, e.g. when using the hardware trigger mode.
            nBuffersInQueue = 0;
            while (camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return ))
            {
                cout << "Skipped " << ptrGrabResult->GetNumberOfSkippedImages() << " images." << endl;
                nBuffersInQueue++;
            }

            cout << "Retrieved " << nBuffersInQueue << " grab result from output queue." << endl << endl;

            //Stop the grabbing.
            camera.StopGrabbing();



            cout << endl << "Grab using strategy GrabStrategy_LatestImages:" << endl << endl;

            // The GrabStrategy_LatestImages strategy is used. The images are processed
            // in the order of their arrival, but only a number of the images received last
            // are kept in the output queue.

            // The size of the output queue can be adjusted.
            // When using this strategy the OutputQueueSize parameter can be changed during grabbing.
            camera.OutputQueueSize = 2;

            camera.StartGrabbing( GrabStrategy_LatestImages );

            // Execute the software trigger, wait actively until the camera accepts the next frame trigger or until the timeout occurs.
            for (int i = 0; i < 3; ++i)
            {
                if (camera.WaitForFrameTriggerReady( 1000, TimeoutHandling_ThrowException ))
                {
                    camera.ExecuteSoftwareTrigger();
                }
            }

            // Wait for all images.
            WaitObject::Sleep( 3 * 1000 );

            // Check whether the grab results are waiting.
            if (camera.GetGrabResultWaitObject().Wait( 0 ))
            {
                cout << endl << "Grab results wait in the output queue." << endl << endl;
            }

            // Only the images received last are waiting in the internal output queue
            // and are now retrieved.
            // The grabbing continues in the background, e.g. when using the hardware trigger mode.
            nBuffersInQueue = 0;
            while (camera.RetrieveResult( 0, ptrGrabResult, TimeoutHandling_Return ))
            {
                if (ptrGrabResult->GetNumberOfSkippedImages())
                {
                    cout << "Skipped " << ptrGrabResult->GetNumberOfSkippedImages() << " image." << endl;
                }
                nBuffersInQueue++;
            }

            cout << "Retrieved " << nBuffersInQueue << " grab results from output queue." << endl << endl;

            // When setting the output queue size to 1 this strategy is equivalent to the GrabStrategy_LatestImageOnly grab strategy.
            camera.OutputQueueSize = 1;

            // When setting the output queue size to CInstantCamera::MaxNumBuffer this strategy is equivalent to GrabStrategy_OneByOne.
            camera.OutputQueueSize = camera.MaxNumBuffer;

            //Stop the grabbing.
            camera.StopGrabbing();



            // The Upcoming Image grab strategy can't be used together with USB camera devices.
            // For more information, see the advanced topics section of the pylon Programmer's Guide.
            if (!camera.IsUsb())
            {
                cout << endl << "Grab using the GrabStrategy_UpcomingImage strategy:" << endl << endl;

                // Reconfigure the camera to use continuous acquisition.
                CAcquireContinuousConfiguration().OnOpened( camera );

                // The GrabStrategy_UpcomingImage strategy is used. A buffer for grabbing
                // is queued each time when RetrieveResult()
                // is called. The image data is grabbed into the buffer and returned.
                // This ensures that the image grabbed is the next image
                // received from the camera.
                // All images are still transported to the computer.
                camera.StartGrabbing( GrabStrategy_UpcomingImage );

                // Queues a buffer for grabbing and waits for the grab to finish.
                camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );

                // Sleep.
                WaitObject::Sleep( 1000 );

                // Check no grab result is waiting, because no buffers are queued for grabbing.
                if (!camera.GetGrabResultWaitObject().Wait( 0 ))
                {
                    cout << "No grab result waits in the output queue." << endl << endl;
                }

                //Stop the grabbing.
                camera.StopGrabbing();
            }
        }
        else
        {
            // See the documentation of CInstantCamera::CanWaitForFrameTriggerReady() for more information.
            cout << endl;
            cout << "This sample can only be used with cameras that can be queried whether they are ready to accept the next frame trigger.";
            cout << endl;
            cout << endl;
        }
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_UsingActionCommand#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_UsingActionCommand.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample shows how to issue a GigE Vision ACTION_CMD to multiple cameras.
    By using an action command multiple cameras can be triggered at the same time
    compared to software triggering, which must be triggered individually.

    To make the configuration of multiple cameras easier this sample uses the CInstantCameraArray class.
    It also uses a CActionTriggerConfiguration to set up the basic action command features.
*/

#include <time.h>   // for time
#include <stdlib.h> // for rand & srand

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#   include <pylon/PylonGUI.h>
#endif

#include <pylon/BaslerUniversalInstantCameraArray.h>
#include <pylon/Info.h>
#include <pylon/gige/GigETransportLayer.h>
#include <pylon/gige/ActionTriggerConfiguration.h>
#include <pylon/gige/BaslerGigEDeviceInfo.h>


// Namespace for using pylon universal instant camera parameters.
using namespace Basler_UniversalCameraParams;

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

// Limits the amount of cameras used for grabbing.
// It is important to manage the available bandwidth when grabbing with multiple
// cameras. This applies, for instance, if two GigE cameras are connected to the
// same network adapter via a switch. To manage the bandwidth, the GevSCPD
// interpacket delay parameter and the GevSCFTD transmission delay parameter can
// be set for each GigE camera device. The "Controlling Packet Transmission Timing
// with the Interpacket and Frame Transmission Delays on Basler GigE Vision Cameras"
// Application Note (AW000649xx000) provides more information about this topic.
static const uint32_t c_maxCamerasToUse = 2;


int main( int /*argc*/, char* /*argv*/[] )
{
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Get the GigE transport layer.
        // We'll need it later to issue the action commands.
        CTlFactory& tlFactory = CTlFactory::GetInstance();
        IGigETransportLayer* pTL = dynamic_cast<IGigETransportLayer*>(tlFactory.CreateTl( BaslerGigEDeviceClass ));
        if (pTL == NULL)
        {
            throw RUNTIME_EXCEPTION( "No GigE transport layer available." );
        }


        // In this sample we use the transport layer directly to enumerate cameras.
        // By calling EnumerateDevices on the TL we get get only GigE cameras.
        // You could also accomplish this by using a filter and
        // let the Transport Layer Factory enumerate.
        DeviceInfoList_t allDeviceInfos;
        if (pTL->EnumerateDevices( allDeviceInfos ) == 0)
        {
            throw RUNTIME_EXCEPTION( "No GigE cameras present." );
        }

        // Only use cameras in the same subnet as the first one.
        DeviceInfoList_t usableDeviceInfos;
        usableDeviceInfos.push_back( allDeviceInfos[0] );
        const String_t subnet( allDeviceInfos[0].GetSubnetAddress() );

        // Start with index 1 as we have already added the first one above.
        // We will also limit the number of cameras to c_maxCamerasToUse.
        for (size_t i = 1; i < allDeviceInfos.size() && usableDeviceInfos.size() < c_maxCamerasToUse; ++i)
        {
            if (subnet == allDeviceInfos[i].GetSubnetAddress())
            {
                // Add this deviceInfo to the ones we will be using.
                usableDeviceInfos.push_back( allDeviceInfos[i] );
            }
            else
            {
                cerr << "Camera will not be used because it is in a different subnet "
                    << subnet << "!" << endl;
            }
        }

        // In this sample we'll use an CBaslerGigEInstantCameraArray to access multiple cameras.
        CBaslerUniversalInstantCameraArray cameras( usableDeviceInfos.size() );

        // Seed the random number generator and generate a random device key value.
        srand( (unsigned) time( NULL ) );
        const uint32_t DeviceKey = rand();

        // For this sample we configure all cameras to be in the same group.
        const uint32_t GroupKey = 0x112233;

        // For the following sample we use the CActionTriggerConfiguration to configure the camera.
        // It will set the DeviceKey, GroupKey and GroupMask features. It will also
        // configure the camera FrameTrigger and set the TriggerSource to the action command.
        // You can look at the implementation of CActionTriggerConfiguration in <pylon/gige/ActionTriggerConfiguration.h>
        // to see which features are set.

        // Create all GigE cameras and attach them to the InstantCameras in the array.
        for (size_t i = 0; i < cameras.GetSize(); ++i)
        {
            cameras[i].Attach( tlFactory.CreateDevice( usableDeviceInfos[i] ) );
            // We'll use the CActionTriggerConfiguration, which will set up the cameras to wait for an action command.
            cameras[i].RegisterConfiguration( new CActionTriggerConfiguration( DeviceKey, GroupKey, AllGroupMask ), RegistrationMode_Append, Cleanup_Delete );
            // Set the context. This will help us later to correlate the grab result to a camera in the array.
            cameras[i].SetCameraContext( i );

            const CBaslerGigEDeviceInfo& di = cameras[i].GetDeviceInfo();

            // Print the model name of the camera.
            cout << "Using camera " << i << ": " << di.GetModelName() << " (" << di.GetIpAddress() << ")" << endl;
        }

        // Open all cameras.
        // This will apply the CActionTriggerConfiguration specified above.
        cameras.Open();

        //////////////////////////////////////////////////////////////////////
        //////////////////////////////////////////////////////////////////////
        // Use an Action Command to Trigger Multiple Cameras at the Same Time.
        //////////////////////////////////////////////////////////////////////
        //////////////////////////////////////////////////////////////////////

        cout << endl << "Issuing an action command." << endl;

        // Starts grabbing for all cameras.
        // The cameras won't transmit any image data, because they are configured to wait for an action command.
        cameras.StartGrabbing();

        // Now we issue the action command to all devices in the subnet.
        // The devices with a matching DeviceKey, GroupKey and valid GroupMask will grab an image.
        pTL->IssueActionCommand( DeviceKey, GroupKey, AllGroupMask, subnet );

        // This smart pointer will receive the grab result data.
        CBaslerUniversalGrabResultPtr ptrGrabResult;

        // Retrieve images from all cameras.
        const int DefaultTimeout_ms = 5000;
        for (size_t i = 0; i < usableDeviceInfos.size() && cameras.IsGrabbing(); ++i)
        {
            // CInstantCameraArray::RetrieveResult will return grab results in the order they arrive.
            cameras.RetrieveResult( DefaultTimeout_ms, ptrGrabResult, TimeoutHandling_ThrowException );

            // When the cameras in the array are created the camera context value
            // is set to the index of the camera in the array.
            // The camera context is a user-settable value.
            // This value is attached to each grab result and can be used
            // to determine the camera that produced the grab result.
            intptr_t cameraIndex = ptrGrabResult->GetCameraContext();


            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
#ifdef PYLON_WIN_BUILD
                // Show the image acquired by each camera in the window related to the camera.
                // DisplayImage supports up to 32 image windows.
                if (cameraIndex <= 31)
                    Pylon::DisplayImage( cameraIndex, ptrGrabResult );
#endif
                // Print the index and the model name of the camera.
                cout << "Camera " << cameraIndex << ": " << cameras[cameraIndex].GetDeviceInfo().GetModelName() <<
                    " (" << cameras[cameraIndex].GetDeviceInfo().GetIpAddress() << ")" << endl;

                // You could process the image here by accessing the image buffer.
                cout << "GrabSucceeded: " << ptrGrabResult->GrabSucceeded() << endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                cout << "Gray value of first pixel: " << (uint32_t) pImageBuffer[0] << endl << endl;
            }
            else
            {
                // If a buffer has been incompletely grabbed, the network bandwidth is possibly insufficient for transferring
                // multiple images simultaneously. See note above c_maxCamerasToUse.
                cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription() << endl;
            }
        }

        // In case you want to trigger again you should wait for the camera
        // to become trigger-ready before issuing the next action command.
        // To avoid overtriggering you should call cameras[0].WaitForFrameTriggerReady
        // (see Grab_UsingGrabLoopThread sample for details).

        cameras.StopGrabbing();

        // Close all cameras.
        cameras.Close();
    }
    catch (const GenericException& e)
    {
        // Error handling
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_UsingBufferFactory#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_UsingBufferFactory.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample demonstrates how to use a user-provided buffer factory.
    Using a buffer factory is optional and intended for advanced use cases only.
    A buffer factory is only necessary if you want to grab into externally supplied buffers.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#include <pylon/PylonGUI.h>
#endif

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 5;


// A user-provided buffer factory.
class MyBufferFactory : public IBufferFactory
{
public:
    MyBufferFactory()
        : m_lastBufferContext( 1000 )
    {
    }

    virtual ~MyBufferFactory()
    {
    }


    // Will be called when the Instant Camera object needs to allocate a buffer.
    // Return the buffer and context data in the output parameters.
    // In case of an error, new() will throw an exception
    // which will be forwarded to the caller to indicate an error.
    // Warning: This method can be called by different threads.
    virtual void AllocateBuffer( size_t bufferSize, void** pCreatedBuffer, intptr_t& bufferContext )
    {
        try
        {
            // Allocate buffer for pixel data.
            // If you already have a buffer allocated by your image processing library, you can use this instead.
            // In this case, you must modify the delete code (see below) accordingly.
            *pCreatedBuffer = new uint8_t[bufferSize];
            // The context information is never changed by the Instant Camera and can be used
            // by the buffer factory to manage the buffers.
            // The context information can be retrieved from a grab result by calling
            // ptrGrabResult->GetBufferContext();
            bufferContext = ++m_lastBufferContext;

            cout << "Created buffer " << bufferContext << ", " << *pCreatedBuffer << endl;
        }
        catch (const std::exception&)
        {
            // In case of an error you must free the memory you may have already allocated.
            if (*pCreatedBuffer != NULL)
            {
                uint8_t* p = reinterpret_cast<uint8_t*>(pCreatedBuffer);
                delete[] p;
                *pCreatedBuffer = NULL;
            }

            // Rethrow exception.
            // AllocateBuffer can also just return with *pCreatedBuffer = NULL to indicate
            // that no buffer is available at the moment.
            throw;
        }
    }


    // Frees a previously allocated buffer.
    // Warning: This method can be called by different threads.
    virtual void FreeBuffer( void* pCreatedBuffer, intptr_t bufferContext )
    {
        uint8_t* p = reinterpret_cast<uint8_t*>(pCreatedBuffer);
        delete[] p;
        cout << "Freed buffer " << bufferContext << ", " << pCreatedBuffer << endl;
    }


    // Destroys the buffer factory.
    // This will be used when you pass the ownership of the buffer factory instance to pylon
    // by defining Cleanup_Delete. pylon will call this function to destroy the instance
    // of the buffer factory. If you don't pass the ownership to pylon (Cleanup_None),
    // this method will be ignored.
    virtual void DestroyBufferFactory()
    {
        delete this;
    }


protected:

    unsigned long m_lastBufferContext;
};


int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // The buffer factory must be created first because objects on the
        // stack are destroyed in reverse order of creation.
        // The buffer factory must exist longer than the Instant Camera object
        // in this sample.
        MyBufferFactory myFactory;

        // Create an instant camera object with the camera device found first.
        CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Print the model name of the camera.
        cout << "Using device " << camera.GetDeviceInfo().GetModelName() << endl;

        // Use our own implementation of a buffer factory.
        // Since we control the lifetime of the factory object, we pass Cleanup_None.
        camera.SetBufferFactory( &myFactory, Cleanup_None );

        // The parameter MaxNumBuffer can be used to control the count of buffers
        // allocated for grabbing. The default value of this parameter is 10.
        camera.MaxNumBuffer = 5;

        // If the 'BufferHandlingMode_Stream' is used, make sure to set
        // camera.MaxNumQueuedBuffer to a value smaller than or equal to the value
        // of camera.MaxNumBuffer.
        // Note: The USB3 Vision and GenTL transport layers do not support the
        // 'BufferHandlingMode_Stream' mode.

        // Start the grabbing of c_countOfImagesToGrab images.
        // The camera device is parameterized with a default configuration which
        // sets up free-running continuous acquisition.
        camera.StartGrabbing( c_countOfImagesToGrab );

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved.
        while (camera.IsGrabbing())
        {
            // Wait for an image and then retrieve it. A timeout of 5000 ms is used.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );

            // Image grabbed successfully?
            if (ptrGrabResult->GrabSucceeded())
            {
                // Access the image data.
                cout << "Context: " << ptrGrabResult->GetBufferContext() << endl;
                cout << "SizeX: " << ptrGrabResult->GetWidth() << endl;
                cout << "SizeY: " << ptrGrabResult->GetHeight() << endl;
                const uint8_t* pImageBuffer = (uint8_t*) ptrGrabResult->GetBuffer();
                cout << "First value of pixel data: " << (uint32_t) pImageBuffer[0] << endl << endl;

#ifdef PYLON_WIN_BUILD
                // Display the grabbed image.
                Pylon::DisplayImage( 1, ptrGrabResult );
#endif
            }
            else
            {
                cout << "Error: " << std::hex << ptrGrabResult->GetErrorCode() << std::dec << " " << ptrGrabResult->GetErrorDescription();
            }
        }
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_UsingExposureEndEvent#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_UsingExposureEndEvent.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample shows how to use the Exposure End event to speed up image acquisition.
    For example, when a sensor exposure is finished, the camera can send an Exposure End event to the computer.
    The computer can receive the event before the image data of the finished exposure has been transferred completely.
    This avoids unnecessary delays, e.g., when an image object moves before the related image data transfer is complete.

    Note: This sample shows how to match incoming images using the camera.EventExposureEndFrameID
          and the ptrGrabResult->GetBlockID() values. For ace 2 camera models,
          camera.EventExposureEndFrameID and ptrGrabResult->GetBlockID() don't contain matching values.
          The ptrGrabResult->GetBlockID() equivalent is the chunk value represented by the camera.ChunkSelector FrameID.
          Please see the Grab_ChunkImage sample for more information about how to determine the
          correct chunk value to use instead of ptrGrabResult->GetBlockID().
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>

// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"

#include <iomanip>

#ifdef PYLON_UNIX_BUILD
#    include <sys/time.h>
#endif

// Include file to use pylon universal instant camera parameters.
#include <pylon/BaslerUniversalInstantCamera.h>

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using pylon universal instant camera parameters.
using namespace Basler_UniversalCameraParams;

// Namespace for using cout.
using namespace std;

// Enumeration used for distinguishing different events.
enum MyEvents
{
    eMyExposureEndEvent,      // Triggered by a camera event.
    eMyImageReceivedEvent,    // Triggered by the receipt of an image.
    eMyMoveEvent,             // Triggered when the imaged item or the sensor head can be moved.
    eMyNoEvent                // Used as default setting.
};

// Names of possible events for printed output.
const char* MyEventNames[] =
{
    "ExposureEndEvent     ",
    "ImageReceived        ",
    "Move                 ",
    "NoEvent              "
};

// Used for logging received events without outputting the information on the screen
// because outputting will change the timing.
// This class is used for demonstration purposes only.
struct LogItem
{
    LogItem()
        : eventType( eMyNoEvent )
        , frameNumber( 0 )
    {
    }

    LogItem( MyEvents event, uint16_t frameNr )
        : eventType( event )
        , frameNumber( frameNr )
    {
        //Warning. The values measured may not be correct on older computer hardware.
#if defined(PYLON_WIN_BUILD)
        QueryPerformanceCounter( &time );
#elif defined(PYLON_UNIX_BUILD)
        struct timeval tv;

        gettimeofday( &tv, NULL );
        time = static_cast<unsigned long long>(tv.tv_sec) * 1000L + static_cast<unsigned long long>(tv.tv_usec) / 1000LL;
#endif
    }


#if defined(PYLON_WIN_BUILD)
    LARGE_INTEGER time; // Timestamps recorded.
#elif defined(PYLON_UNIX_BUILD)
    unsigned long long time; // Timestamps recorded.
#endif
    MyEvents eventType; // Type of the event received.
    uint16_t frameNumber; // Frame number of the event received.
};


// Helper function for printing a log.
// This function is used for demonstration purposes only.
void PrintLog( const std::vector<LogItem>& aLog )
{
#if defined(PYLON_WIN_BUILD)
    // Get the computer timer frequency.
    LARGE_INTEGER timerFrequency;
    QueryPerformanceFrequency( &timerFrequency );
#endif

    cout << std::endl << "Warning. The time values printed may not be correct on older computer hardware." << std::endl << std::endl;
    // Print the event information header.
    cout << "Time [ms]    " << "Event                 " << "Frame Number" << std::endl;
    cout << "------------ " << "--------------------- " << "-----------" << std::endl;

    // Print the logged information.
    size_t logSize = aLog.size();
    for (size_t i = 0; i < logSize; ++i)
    {
        // Calculate the time elapsed between the events.
        double time_ms = 0;
        if (i)
        {
#if defined(PYLON_WIN_BUILD)
            __int64 oldTicks = ((__int64) aLog[i - 1].time.HighPart << 32) + (__int64) aLog[i - 1].time.LowPart;
            __int64 newTicks = ((__int64) aLog[i].time.HighPart << 32) + (__int64) aLog[i].time.LowPart;
            long double timeDifference = (long double) (newTicks - oldTicks);
            long double ticksPerSecond = (long double) (((__int64) timerFrequency.HighPart << 32) + (__int64) timerFrequency.LowPart);
            time_ms = (timeDifference / ticksPerSecond) * 1000;
#elif defined(PYLON_UNIX_BUILD)
            time_ms = aLog[i].time - aLog[i - 1].time;
#endif
        }

        // Print the event information.
        cout << setw( 12 ) << fixed << setprecision( 4 ) << time_ms << " " << MyEventNames[aLog[i].eventType] << " " << aLog[i].frameNumber << std::endl;
    }
}


// Number of images to be grabbed.
static const uint32_t c_countOfImagesToGrab = 20;


// Example handler for GigE camera events.
// Additional handling is required for GigE camera events because the event network packets may get lost, duplicated, or delayed in the network.
class CEventHandler : public CBaslerUniversalCameraEventHandler, public CBaslerUniversalImageEventHandler
{
public:
    CEventHandler()
        : m_nextExpectedFrameNumberImage( 0 )
        , m_nextExpectedFrameNumberExposureEnd( 0 )
        , m_nextFrameNumberForMove( 0 )
        , m_isGigE( false )
    {
        // Reserve space to log camera, image, and move events.
        m_log.reserve( c_countOfImagesToGrab * 3 );
    }

    void Initialize( int value, bool isGigE )
    {
        m_nextExpectedFrameNumberImage = value;
        m_nextExpectedFrameNumberExposureEnd = value;
        m_nextFrameNumberForMove = value;
        m_isGigE = isGigE;
    }

    // This method is called when a camera event has been received.
    virtual void OnCameraEvent( CBaslerUniversalInstantCamera& camera, intptr_t userProvidedId, GenApi::INode* /* pNode */ )
    {
        if (userProvidedId == eMyExposureEndEvent)
        {
            // An Exposure End event has been received.
            uint16_t frameNumber;
            if (camera.GetSfncVersion() < Sfnc_2_0_0)
            {
                frameNumber = (uint16_t) camera.ExposureEndEventFrameID.GetValue();
            }
            else
            {
                frameNumber = (uint16_t) camera.EventExposureEndFrameID.GetValue();
            }
            m_log.push_back( LogItem( eMyExposureEndEvent, frameNumber ) );

            if (GetIncrementedFrameNumber( frameNumber ) != m_nextExpectedFrameNumberExposureEnd)
            {
                // Check whether the imaged item or the sensor head can be moved.
                if (frameNumber == m_nextFrameNumberForMove)
                {
                    MoveImagedItemOrSensorHead();
                }

                // Check for missing Exposure End events.
                if (frameNumber != m_nextExpectedFrameNumberExposureEnd)
                {
                    throw RUNTIME_EXCEPTION( "An Exposure End event has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberExposureEnd, frameNumber );
                }
                IncrementFrameNumber( m_nextExpectedFrameNumberExposureEnd );
            }
        }
        else
        {
            PYLON_ASSERT2( false, "The sample has been modified and a new event has been registered. Add handler code above." );
        }
    }

    // This method is called when an image has been grabbed.
    virtual void OnImageGrabbed( CBaslerUniversalInstantCamera& /*camera*/, const CBaslerUniversalGrabResultPtr& ptrGrabResult )
    {
        // An image has been received.
        uint16_t frameNumber = (uint16_t) ptrGrabResult->GetBlockID();
        m_log.push_back( LogItem( eMyImageReceivedEvent, frameNumber ) );

        // Check whether the imaged item or the sensor head can be moved.
        // This will be the case if the Exposure End has been lost or if the Exposure End is received later than the image.
        if (frameNumber == m_nextFrameNumberForMove)
        {
            MoveImagedItemOrSensorHead();
        }

        // Check for missing images.
        if (frameNumber != m_nextExpectedFrameNumberImage)
        {
            throw RUNTIME_EXCEPTION( "An image has been lost. Expected frame number is %d but got frame number %d.", m_nextExpectedFrameNumberImage, frameNumber );
        }
        IncrementFrameNumber( m_nextExpectedFrameNumberImage );
    }

    void MoveImagedItemOrSensorHead()
    {
        // The imaged item or the sensor head can be moved now...
        // The camera may not be ready yet for a trigger at this point because the sensor is still being read out.
        // See the documentation of the CInstantCamera::WaitForFrameTriggerReady() method for more information.
        m_log.push_back( LogItem( eMyMoveEvent, m_nextFrameNumberForMove ) );
        IncrementFrameNumber( m_nextFrameNumberForMove );
    }

    void PrintLog()
    {
        ::PrintLog( m_log );
    }

private:
    void IncrementFrameNumber( uint16_t& frameNumber )
    {
        frameNumber = GetIncrementedFrameNumber( frameNumber );
    }

    uint16_t GetIncrementedFrameNumber( uint16_t frameNumber )
    {
        ++frameNumber;

        if (m_isGigE)
        {
            if (frameNumber == 0)
            {
                // Zero is not a valid frame number.
                ++frameNumber;
            }
        }


        return frameNumber;
    }

    uint16_t m_nextExpectedFrameNumberImage;
    uint16_t m_nextExpectedFrameNumberExposureEnd;
    uint16_t m_nextFrameNumberForMove;

    bool m_isGigE;

    std::vector<LogItem> m_log;
};



int main( int /*argc*/, char* /*argv*/[] )
{
    // Exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Create the event handler.
        CEventHandler eventHandler;

        // Create an instant camera object with the first camera device found.
        CBaslerUniversalInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Camera models behave differently regarding IDs and counters. Set initial values.
        if (camera.IsGigE())
        {
            eventHandler.Initialize( 1, true );
        }
        else
        {
            eventHandler.Initialize( 0, false );
        }


        // For demonstration purposes only, add sample configuration event handlers to print information
        // about camera use and image grabbing.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete ); // Camera use.

        // Register the event handler.
        camera.RegisterImageEventHandler( &eventHandler, RegistrationMode_Append, Cleanup_None );

        // Camera event processing must be enabled first. The default is off.
        camera.GrabCameraEvents = true;

        // Open the camera to configure parameters.
        camera.Open();

        // Check whether the device supports events.
        if (!camera.EventSelector.IsWritable())
        {
            throw RUNTIME_EXCEPTION( "The device doesn't support events." );
        }

        if (camera.GetSfncVersion() < Sfnc_2_0_0)
        {
            camera.RegisterCameraEventHandler( &eventHandler, "ExposureEndEventData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None );
        }
        else
        {
            camera.RegisterCameraEventHandler( &eventHandler, "EventExposureEndData", eMyExposureEndEvent, RegistrationMode_ReplaceAll, Cleanup_None );
        }


        // Enable the sending of Exposure End events.
        // Select the event to be received.
        if (camera.EventSelector.TrySetValue( EventSelector_ExposureEnd ))
        {   // Enable it.
            if (!camera.EventNotification.TrySetValue( EventNotification_On ))
            {
                // scout-f, scout-g, and aviator GigE cameras use a different value.
                camera.EventNotification.SetValue( EventNotification_GenICamEvent );
            }
        }


        // Start grabbing of c_countOfImagesToGrab images.
        // The camera device is operated in a default configuration that
        // sets up free-running continuous acquisition.
        camera.StartGrabbing( c_countOfImagesToGrab );

        // This smart pointer will receive the grab result data.
        CGrabResultPtr ptrGrabResult;

        // Camera.StopGrabbing() is called automatically by the RetrieveResult() method
        // when c_countOfImagesToGrab images have been retrieved.
        while (camera.IsGrabbing())
        {
            // Retrieve grab results and notify the camera event and image event handlers.
            camera.RetrieveResult( 5000, ptrGrabResult, TimeoutHandling_ThrowException );
            // Nothing to do here with the grab result The grab results are handled by the registered event handlers.
        }

        // Disable the sending of Exposure End events.
        if (camera.EventSelector.TrySetValue( EventSelector_ExposureEnd ))
        {
            camera.EventNotification.SetValue( EventNotification_Off );
        }

        // Disable the sending of Frame Start Overtrigger events.
        if (camera.EventSelector.TrySetValue( EventSelector_FrameStartOvertrigger ))
        {
            camera.EventNotification.SetValue( EventNotification_Off );
        }

        if (camera.EventSelector.TrySetValue( EventSelector_EventOverrun ))
        {

            // Disable sending Event Overrun events.
            camera.EventSelector.SetValue( EventSelector_EventOverrun );
            camera.EventNotification.SetValue( EventNotification_Off );
        }

        // Print the recorded log showing the timing of events and images.
        eventHandler.PrintLog();
    }
    catch (const GenericException& e)
    {
        // Error handling.
        cerr << "An exception occurred." << endl
            << e.GetDescription() << endl;
        exitCode = 1;
    }

    // Comment the following two lines to disable waiting on exit.
    cerr << endl << "Press enter to exit." << endl;
    while (cin.get() != '\n');

    // Releases all pylon resources.
    PylonTerminate();

    return exitCode;
}

Grab_UsingGrabLoopThread#

This sample is part of the pylon sample solution that can be found under <⁠SDK ROOT>\Development\Samples\C++.

// Grab_UsingGrabLoopThread.cpp
/*
    Note: Before getting started, Basler recommends reading the "Programmer's Guide" topic
    in the pylon C++ API documentation delivered with pylon.
    If you are upgrading to a higher major version of pylon, Basler also
    strongly recommends reading the "Migrating from Previous Versions" topic in the pylon C++ API documentation.

    This sample illustrates how to grab and process images using the grab loop thread
    provided by the Instant Camera class.
*/

// Include files to use the pylon API.
#include <pylon/PylonIncludes.h>
#ifdef PYLON_WIN_BUILD
#    include <pylon/PylonGUI.h>
#endif

// Include files used by samples.
#include "../include/ConfigurationEventPrinter.h"
#include "../include/ImageEventPrinter.h"

// Namespace for using pylon objects.
using namespace Pylon;

// Namespace for using cout.
using namespace std;

//Example of an image event handler.
class CSampleImageEventHandler : public CImageEventHandler
{
public:
    virtual void OnImageGrabbed( CInstantCamera& /*camera*/, const CGrabResultPtr& ptrGrabResult )
    {
        cout << "CSampleImageEventHandler::OnImageGrabbed called." << std::endl;

#ifdef PYLON_WIN_BUILD
        // Display the image
        Pylon::DisplayImage( 1, ptrGrabResult );
#endif
    }
};

int main( int /*argc*/, char* /*argv*/[] )
{
    // The exit code of the sample application.
    int exitCode = 0;

    // Before using any pylon methods, the pylon runtime must be initialized.
    PylonInitialize();

    try
    {
        // Create an instant camera object for the camera device found first.
        CInstantCamera camera( CTlFactory::GetInstance().CreateFirstDevice() );

        // Register the standard configuration event handler for enabling software triggering.
        // The software trigger configuration handler replaces the default configuration
        // as all currently registered configuration handlers are removed by setting the registration mode to RegistrationMode_ReplaceAll.
        camera.RegisterConfiguration( new CSoftwareTriggerConfiguration, RegistrationMode_ReplaceAll, Cleanup_Delete );

        // For demonstration purposes only, registers an event handler configuration to print out information about camera use.
        // The event handler configuration is appended to the registered software trigger configuration handler by setting 
        // registration mode to RegistrationMode_Append.
        camera.RegisterConfiguration( new CConfigurationEventPrinter, RegistrationMode_Append, Cleanup_Delete );

        // The image event printer serves as sample image processing.