new file: i/IOTK/IOTK-1.2.2-intel-2021a.eb

new file:   i/IRODS/IRODS-0.7.3.eb
	new file:   l/libxc/libxc-5.1.5-intel-2020a.eb
	new file:   n/NodeODM/NodeODM-2.2.0-GCCcore-11.3.0.eb
	new file:   o/ODM/ODM-2.8.7/.dockerignore
	new file:   o/ODM/ODM-2.8.7/.github/workflows/publish-docker-and-wsl.yaml
	new file:   o/ODM/ODM-2.8.7/.github/workflows/publish-docker-gpu.yaml
	new file:   o/ODM/ODM-2.8.7/.github/workflows/publish-snap.yml
	new file:   o/ODM/ODM-2.8.7/.github/workflows/publish-windows.yml
	new file:   o/ODM/ODM-2.8.7/.github/workflows/test-build-prs.yaml
	new file:   o/ODM/ODM-2.8.7/.gitignore
	new file:   o/ODM/ODM-2.8.7/.gitmodules
	new file:   o/ODM/ODM-2.8.7/CNAME
	new file:   o/ODM/ODM-2.8.7/CONTRIBUTING.md
	new file:   o/ODM/ODM-2.8.7/Dockerfile
	new file:   o/ODM/ODM-2.8.7/LICENSE
	new file:   o/ODM/ODM-2.8.7/README.md
	new file:   o/ODM/ODM-2.8.7/SuperBuild/CMakeLists.txt
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-Ceres.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-Entwine.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-FPCFilter.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-GFlags.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-Hexer.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-LASzip.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-MvsTexturing.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-Obj2Tiles.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-OpenCV.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-OpenMVS.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-OpenSfM.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-PCL.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-PDAL.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-PyPopsift.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/External-Untwine.cmake
	new file:   o/ODM/ODM-2.8.7/SuperBuild/cmake/ExternalProject-Setup.cmake
	new file:   o/ODM/ODM-2.8.7/VERSION
	new file:   o/ODM/ODM-2.8.7/code_of_conduct.md
	new file:   o/ODM/ODM-2.8.7/configure.py
	new file:   o/ODM/ODM-2.8.7/configure.sh
	new file:   o/ODM/ODM-2.8.7/console.bat
	new file:   o/ODM/ODM-2.8.7/contrib/blender/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/blender/common.py
	new file:   o/ODM/ODM-2.8.7/contrib/blender/odm_photo.py
	new file:   o/ODM/ODM-2.8.7/contrib/blender/odm_video.py
	new file:   o/ODM/ODM-2.8.7/contrib/blender/photo_360.blend
	new file:   o/ODM/ODM-2.8.7/contrib/blender/photo_vr.blend
	new file:   o/ODM/ODM-2.8.7/contrib/grass/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/grass/odm_grass.py
	new file:   o/ODM/ODM-2.8.7/contrib/mergepreview/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/mergepreview/mergepreview.py
	new file:   o/ODM/ODM-2.8.7/contrib/ndvi/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/ndvi/agricultural_indices.py
	new file:   o/ODM/ODM-2.8.7/contrib/ndvi/ndvi.py
	new file:   o/ODM/ODM-2.8.7/contrib/ndvi/rename_sentera_agx710_multispectral_tif.py
	new file:   o/ODM/ODM-2.8.7/contrib/orthorectify/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/orthorectify/orthorectify.py
	new file:   o/ODM/ODM-2.8.7/contrib/orthorectify/run.sh
	new file:   o/ODM/ODM-2.8.7/contrib/pc2dem/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/pc2dem/pc2dem.py
	new file:   o/ODM/ODM-2.8.7/contrib/resize/README.md
	new file:   o/ODM/ODM-2.8.7/contrib/resize/requirements.txt
	new file:   o/ODM/ODM-2.8.7/contrib/resize/resize.py
	new file:   o/ODM/ODM-2.8.7/contrib/shell/odm_exif2utm.sh
	new file:   o/ODM/ODM-2.8.7/contrib/visveg/readme.md
	new file:   o/ODM/ODM-2.8.7/contrib/visveg/vegind.py
	new file:   o/ODM/ODM-2.8.7/docker/README
	new file:   o/ODM/ODM-2.8.7/docker/g++
	new file:   o/ODM/ODM-2.8.7/docker/gcc
	new file:   o/ODM/ODM-2.8.7/docs/issue_template.md
	new file:   o/ODM/ODM-2.8.7/gpu.Dockerfile
	new file:   o/ODM/ODM-2.8.7/innosetup.iss
	new file:   o/ODM/ODM-2.8.7/licenses/libext_copyright.txt
	new file:   o/ODM/ODM-2.8.7/licenses/libx11_copyright.txt
	new file:   o/ODM/ODM-2.8.7/licenses/license.md
	new file:   o/ODM/ODM-2.8.7/odm_docker_readme.txt
	new file:   o/ODM/ODM-2.8.7/opendm/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/boundary.py
	new file:   o/ODM/ODM-2.8.7/opendm/camera.py
	new file:   o/ODM/ODM-2.8.7/opendm/cogeo.py
	new file:   o/ODM/ODM-2.8.7/opendm/concurrency.py
	new file:   o/ODM/ODM-2.8.7/opendm/config.py
	new file:   o/ODM/ODM-2.8.7/opendm/context.py
	new file:   o/ODM/ODM-2.8.7/opendm/cropper.py
	new file:   o/ODM/ODM-2.8.7/opendm/cutline.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/commands.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/bounds/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/bounds/types.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/bounds/utils.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/extra_dimensions/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/extra_dimensions/dimension.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/extra_dimensions/distance_dimension.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/extra_dimensions/extended_dimension.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/extra_dimensions/partition_dimension.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/grid/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/grid/builder.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/io/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/io/las_io.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/one_partition.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/partition_plan.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/quad_partitions.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/selector.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/partition/surrounding_partitions.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/point_cloud.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/ground_rectification/rectify.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/merge.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/pdal.py
	new file:   o/ODM/ODM-2.8.7/opendm/dem/utils.py
	new file:   o/ODM/ODM-2.8.7/opendm/dls.py
	new file:   o/ODM/ODM-2.8.7/opendm/entwine.py
	new file:   o/ODM/ODM-2.8.7/opendm/gcp.py
	new file:   o/ODM/ODM-2.8.7/opendm/geo.py
	new file:   o/ODM/ODM-2.8.7/opendm/get_image_size.py
	new file:   o/ODM/ODM-2.8.7/opendm/gpu.py
	new file:   o/ODM/ODM-2.8.7/opendm/gsd.py
	new file:   o/ODM/ODM-2.8.7/opendm/io.py
	new file:   o/ODM/ODM-2.8.7/opendm/location.py
	new file:   o/ODM/ODM-2.8.7/opendm/log.py
	new file:   o/ODM/ODM-2.8.7/opendm/loghelpers.py
	new file:   o/ODM/ODM-2.8.7/opendm/mesh.py
	new file:   o/ODM/ODM-2.8.7/opendm/multispectral.py
	new file:   o/ODM/ODM-2.8.7/opendm/nvm.py
	new file:   o/ODM/ODM-2.8.7/opendm/ogctiles.py
	new file:   o/ODM/ODM-2.8.7/opendm/orthophoto.py
	new file:   o/ODM/ODM-2.8.7/opendm/osfm.py
	new file:   o/ODM/ODM-2.8.7/opendm/photo.py
	new file:   o/ODM/ODM-2.8.7/opendm/point_cloud.py
	new file:   o/ODM/ODM-2.8.7/opendm/progress.py
	new file:   o/ODM/ODM-2.8.7/opendm/pseudogeo.py
	new file:   o/ODM/ODM-2.8.7/opendm/remote.py
	new file:   o/ODM/ODM-2.8.7/opendm/report/dsm_gradient.png
	new file:   o/ODM/ODM-2.8.7/opendm/report/overlap_color_map.txt
	new file:   o/ODM/ODM-2.8.7/opendm/report/overlap_diagram_legend.png
	new file:   o/ODM/ODM-2.8.7/opendm/rollingshutter.py
	new file:   o/ODM/ODM-2.8.7/opendm/shots.py
	new file:   o/ODM/ODM-2.8.7/opendm/system.py
	new file:   o/ODM/ODM-2.8.7/opendm/thermal.py
	new file:   o/ODM/ODM-2.8.7/opendm/thermal_tools/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/thermal_tools/dji_unpack.py
	new file:   o/ODM/ODM-2.8.7/opendm/thermal_tools/flir_unpack.py
	new file:   o/ODM/ODM-2.8.7/opendm/thermal_tools/thermal_utils.py
	new file:   o/ODM/ODM-2.8.7/opendm/tiles/color_relief.txt
	new file:   o/ODM/ODM-2.8.7/opendm/tiles/gdal2tiles.py
	new file:   o/ODM/ODM-2.8.7/opendm/tiles/hsv_merge.py
	new file:   o/ODM/ODM-2.8.7/opendm/tiles/tiler.py
	new file:   o/ODM/ODM-2.8.7/opendm/types.py
	new file:   o/ODM/ODM-2.8.7/opendm/utils.py
	new file:   o/ODM/ODM-2.8.7/opendm/vendor/__init__.py
	new file:   o/ODM/ODM-2.8.7/opendm/vendor/gdal_fillnodata.py
	new file:   o/ODM/ODM-2.8.7/portable.Dockerfile
	new file:   o/ODM/ODM-2.8.7/requirements.txt
	new file:   o/ODM/ODM-2.8.7/run.bat
	new file:   o/ODM/ODM-2.8.7/run.py
	new file:   o/ODM/ODM-2.8.7/run.sh
	new file:   o/ODM/ODM-2.8.7/snap/snapcraft.yaml
	new file:   o/ODM/ODM-2.8.7/snap/snapcraft21.yaml
	new file:   o/ODM/ODM-2.8.7/stages/__init__.py
	new file:   o/ODM/ODM-2.8.7/stages/dataset.py
	new file:   o/ODM/ODM-2.8.7/stages/mvstex.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_app.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_dem.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_filterpoints.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_georeferencing.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_meshing.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_orthophoto.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_postprocess.py
	new file:   o/ODM/ODM-2.8.7/stages/odm_report.py
	new file:   o/ODM/ODM-2.8.7/stages/openmvs.py
	new file:   o/ODM/ODM-2.8.7/stages/run_opensfm.py
	new file:   o/ODM/ODM-2.8.7/stages/splitmerge.py
	new file:   o/ODM/ODM-2.8.7/start-dev-env.sh
	new file:   o/ODM/ODM-2.8.7/test.sh
	new file:   o/ODM/ODM-2.8.7/tests/assets/.gitignore
	new file:   o/ODM/ODM-2.8.7/tests/assets/gcp_extras.txt
	new file:   o/ODM/ODM-2.8.7/tests/assets/gcp_latlon_south.txt
	new file:   o/ODM/ODM-2.8.7/tests/assets/gcp_latlon_valid.txt
	new file:   o/ODM/ODM-2.8.7/tests/assets/gcp_michigan_feet_valid.txt
	new file:   o/ODM/ODM-2.8.7/tests/assets/gcp_utm_north_valid.txt
	new file:   o/ODM/ODM-2.8.7/tests/assets/images/DJI_0002.JPG
	new file:   o/ODM/ODM-2.8.7/tests/assets/reconstruction.json
	new file:   o/ODM/ODM-2.8.7/tests/assets/sample.json
	new file:   o/ODM/ODM-2.8.7/tests/test_camera.py
	new file:   o/ODM/ODM-2.8.7/tests/test_gcp.py
	new file:   o/ODM/ODM-2.8.7/tests/test_osfm.py
	new file:   o/ODM/ODM-2.8.7/tests/test_remote.py
	new file:   o/ODM/ODM-2.8.7/tests/test_types.py
	new file:   o/ODM/ODM-2.8.7/vcpkg-requirements.txt
	new file:   o/ODM/ODM-2.8.7/win32env.bat
	new file:   o/ODM/ODM-2.8.7/winrun.bat
	new file:   o/ODM/v2.8.7.tar.gz
	new file:   y/Yambo/Yambo-5.1.0-intel-2020a.eb
	new file:   y/Yambo/Yambo-5.1.0-intel-2020a.eb.bak_20220715124556_61837
	new file:   y/Yambo/Yambo-5.1.0-intel-2021a.eb.old
	deleted:    y/Yambo/Yambo-5.1.0-intel-2021a.eb
This commit is contained in:
Jakub Kropacek 2022-07-19 15:02:22 +02:00 committed by easybuild
parent 43ddb383a1
commit 9b1342122b
194 changed files with 22745 additions and 21 deletions

View File

@ -0,0 +1,41 @@
# IT4Innovations 2022
# LK JK
easyblock = 'MakeCp'
name = 'IOTK'
version = '1.2.2'
homepage = 'https://github.com/QEF/iotk/tree/master/iotk'
description = """The input/output tool kit (IOTK) is a Fortran90 library intended to provide a
simplified access to tagged files formatted using some specific rule."""
source_urls = ['https://github.com/yambo-code/yambo/files/962173']
sources = ['iotk-y1.2.2.tar.gz']
patches = ['IOTK-intel-2020a.patch']
checksums = [
'c0a4eb19f3e885d83d7afa52eb90658fba7cb1cb6e66049866a98dcc980de543', # iotk-y1.2.2.tar.gz
'e0536c490ce57e60ba6e10c54f1b9baf9c4d6cd6c18be8a4d2f7a4e685200df2', # IOTK-intel-2020a.patch
]
toolchain = {'name': 'intel', 'version': '2021a'}
with_configure = True
buildopts = 'all'
maxparallel = 1
files_to_copy = [
(['include/*'], 'include'),
(['src/*.x'], 'bin'),
(['src/*.a'], 'lib'),
(['src/*'], 'src'),
]
sanity_check_paths = {
'files': [],
'dirs': ['bin', 'include', 'lib'],
}
moduleclass = 'lib'

20
i/IRODS/IRODS-0.7.3.eb Normal file
View File

@ -0,0 +1,20 @@
easyblock = 'PackedBinary'
name = 'IRODS'
version = '0.7.3'
homepage = 'http://www.example.com'
description = """TEMPLATE DESCRIPTION"""
toolchain = SYSTEM
source_urls = ['https://github.com/cyverse/irodsfs/releases/download/v%(version)s/']
sources = ['irodsfs_amd64_linux_v%(version)s.tar']
checksums = ['0c973134e7646309a9292d3b2f3df8b62f27c686c9a04de49f71ea9f9b15a038']
sanity_check_paths = {
'files': ['irodsfs'],
'dirs': []
}
moduleclass = 'lib'

View File

@ -0,0 +1,48 @@
# IT4Innovations 2022
# JK
easyblock = 'CMakeMake'
name = 'libxc'
version = '5.1.5'
homepage = 'https://www.tddft.org/programs/libxc'
description = """Libxc is a library of exchange-correlation functionals for density-functional theory.
The aim is to provide a portable, well tested and reliable set of exchange and correlation functionals."""
toolchain = {'name': 'intel', 'version': '2020a'}
source_urls = ['https://www.tddft.org/programs/libxc/down.php?file=%(version)s/']
sources = [SOURCE_TAR_GZ]
checksums = ['02e4615a22dc3ec87a23efbd3d9be5bfad2445337140bad1720699571c45c3f9']
builddependencies = [
('CMake', '3.16.4'),
('Perl', '5.30.2'),
]
separate_build_dir = True
local_common_configopts = "-DENABLE_FORTRAN=ON -DENABLE_FORTRAN03=ON -DENABLE_XHOST=OFF"
# perform iterative build to get both static and shared libraries
configopts = [
local_common_configopts + ' -DBUILD_SHARED_LIBS=OFF',
local_common_configopts + ' -DBUILD_SHARED_LIBS=ON',
]
parallel = 1
# make sure that built libraries (libxc*.so*) in build directory are picked when running tests
# this is required when RPATH linking is used
pretestopts = "export LD_LIBRARY_PATH=%(builddir)s/easybuild_obj:$LD_LIBRARY_PATH && "
runtest = 'test'
sanity_check_paths = {
'files': ['bin/xc-info'] +
['lib/libxc%s.%s' % (x, y) for x in ['', 'f03', 'f90'] for y in ['a', SHLIB_EXT]],
'dirs': ['include', 'lib/pkgconfig', 'share/cmake/Libxc'],
}
moduleclass = 'chem'

View File

@ -0,0 +1,31 @@
# IT4Innovations 2022
# JK
easyblock = 'Binary'
name = 'NodeODM'
version = '2.2.0'
local_nodejsver = '16.15.1'
homepage = 'https://github.com/request/request/'
description = """Request is designed to be the simplest way possible to make http calls.
It supports HTTPS and follows redirects by default."""
toolchain = {'name': 'GCCcore', 'version': '11.3.0'}
source_urls = ['https://github.com/OpenDroneMap/NodeODM/archive/refs/tags/']
sources = ['v%(version)s.tar.gz']
checksums = ['b901ea27d684774c3a978a474aa0473481ae7fbfaada30a0385e9029e96911fc']
dependencies = [
('nodejs', local_nodejsver),
]
install_cmd = 'npm install --prefix %(installdir)s -g v%(version)s.tar.gz'
sanity_check_paths = {
'files': [],
'dirs': ['lib/node_modules/%(name)s'],
}
moduleclass = 'data'

View File

@ -0,0 +1,26 @@
Dockerfile
tests/test_data
SuperBuild/build
SuperBuild/download
SuperBuild/install
SuperBuild/src
build
opensfm
odm_orthophoto
odm_texturing
odm_meshing
odm_georeferencing
images_resize
.git
/contrib
/docs
/hooks
/img
/license
/tests
tests.sh
settings.yml
code_of_conduct.md
configure_18_04.sh
index.html

View File

@ -0,0 +1,97 @@
name: Publish Docker and WSL Images
on:
push:
branches:
- master
tags:
- v*
jobs:
build:
runs-on: self-hosted
timeout-minutes: 2880
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
with:
config-inline: |
[worker.oci]
max-parallelism = 1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
# Use the repository information of the checked-out code to format docker tags
- name: Docker meta
id: docker_meta
uses: crazy-max/ghaction-docker-meta@v1
with:
images: opendronemap/odm
tag-semver: |
{{version}}
- name: Build and push Docker image
id: docker_build
uses: docker/build-push-action@v2
with:
file: ./portable.Dockerfile
platforms: linux/amd64,linux/arm64
push: true
tags: |
${{ steps.docker_meta.outputs.tags }}
opendronemap/odm:latest
- name: Export WSL image
id: wsl_export
run: |
docker pull opendronemap/odm
docker export $(docker create opendronemap/odm) --output odm-wsl-rootfs-amd64.tar.gz
gzip odm-wsl-rootfs-amd64.tar.gz
echo ::set-output name=amd64-rootfs::"odm-wsl-rootfs-amd64.tar.gz"
# Convert tag into a GitHub Release if we're building a tag
- name: Create Release
if: github.event_name == 'tag'
id: create_release
uses: actions/create-release@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
tag_name: ${{ github.ref }}
release_name: Release ${{ github.ref }}
draft: false
prerelease: false
# Upload the WSL image to the new Release if we're building a tag
- name: Upload amd64 Release Asset
if: github.event_name == 'tag'
id: upload-amd64-wsl-rootfs
uses: actions/upload-release-asset@v1
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
with:
upload_url: ${{ steps.create_release.outputs.upload_url }} # This pulls from the CREATE RELEASE step above, referencing it's ID to get its outputs object, which include a `upload_url`. See this blog post for more info: https://jasonet.co/posts/new-features-of-github-actions/#passing-data-to-future-steps
asset_path: ./${{ steps.wsl_export.outputs.amd64-rootfs }}
asset_name: ${{ steps.wsl_export.outputs.amd64-rootfs }}
asset_content_type: application/gzip
# Always archive the WSL rootfs
- name: Upload amd64 Artifact
uses: actions/upload-artifact@v2
with:
name: wsl-rootfs
path: ${{ steps.wsl_export.outputs.amd64-rootfs }}
- name: Docker image digest and WSL rootfs download URL
run: |
echo "Docker image digest: ${{ steps.docker_build.outputs.digest }}"
echo "WSL AMD64 rootfs URL: ${{ steps.upload-amd64-wsl-rootfs.browser_download_url }}"
# Trigger NodeODM build
- name: Dispatch NodeODM Build Event
id: nodeodm_dispatch
run: |
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker.yaml/dispatches --data '{"ref": "master"}'

View File

@ -0,0 +1,41 @@
name: Publish Docker GPU Images
on:
push:
branches:
- master
tags:
- v*
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Login to DockerHub
uses: docker/login-action@v1
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push Docker image
id: docker_build
uses: docker/build-push-action@v2
with:
file: ./gpu.Dockerfile
platforms: linux/amd64
push: true
tags: opendronemap/odm:gpu
# Trigger NodeODM build
- name: Dispatch NodeODM Build Event
id: nodeodm_dispatch
run: |
curl -X POST -u "${{secrets.PAT_USERNAME}}:${{secrets.PAT_TOKEN}}" -H "Accept: application/vnd.github.everest-preview+json" -H "Content-Type: application/json" https://api.github.com/repos/OpenDroneMap/NodeODM/actions/workflows/publish-docker-gpu.yaml/dispatches --data '{"ref": "master"}'

View File

@ -0,0 +1,56 @@
# name: Publish Snap
# on:
# push:
# branches:
# - master
# tags:
# - v**
# jobs:
# build-and-release:
# runs-on: ubuntu-latest
# strategy:
# matrix:
# architecture:
# - amd64
# steps:
# - name: Checkout
# uses: actions/checkout@v2
# - name: Set Swap Space
# uses: pierotofy/set-swap-space@master
# with:
# swap-size-gb: 12
# - name: Build
# id: build
# uses: diddlesnaps/snapcraft-multiarch-action@v1
# with:
# architecture: ${{ matrix.architecture }}
# - name: Review
# uses: diddlesnaps/snapcraft-review-tools-action@v1
# with:
# snap: ${{ steps.build.outputs.snap }}
# isClassic: 'false'
# - name: Publish unstable builds to Edge
# if: github.ref == 'refs/heads/master'
# uses: snapcore/action-publish@v1
# with:
# store_login: ${{ secrets.STORE_LOGIN }}
# snap: ${{ steps.build.outputs.snap }}
# release: edge
# - name: Publish tagged prerelease builds to Beta
# # These are identified by having a hyphen in the tag name, e.g.: v1.0.0-beta1
# if: startsWith(github.ref, 'refs/tags/v') && contains(github.ref, '-')
# uses: snapcore/action-publish@v1
# with:
# store_login: ${{ secrets.STORE_LOGIN }}
# snap: ${{ steps.build.outputs.snap }}
# release: beta
# - name: Publish tagged stable or release-candidate builds to Candidate
# # These are identified by NOT having a hyphen in the tag name, OR having "-RC" or "-rc" in the tag name.
# if: startsWith(github.ref, 'refs/tags/v1') && ( ( ! contains(github.ref, '-') ) || contains(github.ref, '-RC') || contains(github.ref, '-rc') )
# uses: snapcore/action-publish@v1
# with:
# store_login: ${{ secrets.STORE_LOGIN }}
# snap: ${{ steps.build.outputs.snap }}
# release: candidate

View File

@ -0,0 +1,56 @@
name: Publish Windows Setup
on:
push:
branches:
- master
tags:
- v*
jobs:
build:
runs-on: windows-2019
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.8.1'
architecture: 'x64'
- uses: Jimver/cuda-toolkit@v0.2.4
id: cuda-toolkit
with:
cuda: '11.4.0'
- name: Extract code signing cert
id: code_sign
uses: timheuer/base64-to-file@v1
with:
fileName: 'comodo.pfx'
encodedString: ${{ secrets.CODE_SIGNING_CERT }}
- name: Install venv
run: |
python -m pip install virtualenv
- name: Build sources
run: |
python configure.py build
- name: Create setup
env:
CODE_SIGN_CERT_PATH: ${{ steps.code_sign.outputs.filePath }}
run: |
python configure.py dist --code-sign-cert-path $env:CODE_SIGN_CERT_PATH
- name: Upload Setup File
uses: actions/upload-artifact@v2
with:
name: Setup
path: dist\*.exe
- name: Upload Setup to Release
uses: svenstaro/upload-release-action@v2
if: startsWith(github.ref, 'refs/tags/')
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
file: dist\*.exe
file_glob: true
tag: ${{ github.ref }}
overwrite: true

View File

@ -0,0 +1,70 @@
name: Build PRs
on:
pull_request:
jobs:
docker:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Set Swap Space
uses: pierotofy/set-swap-space@master
with:
swap-size-gb: 12
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build
uses: docker/build-push-action@v2
with:
file: ./portable.Dockerfile
platforms: linux/amd64
push: false
# snapcraft:
# runs-on: ubuntu-latest
# strategy:
# matrix:
# architecture:
# - amd64
# steps:
# - name: Checkout
# uses: actions/checkout@v2
# - name: Set Swap Space
# uses: pierotofy/set-swap-space@master
# with:
# swap-size-gb: 12
# - name: Build
# id: build
# uses: diddlesnaps/snapcraft-multiarch-action@v1
# with:
# architecture: ${{ matrix.architecture }}
# - name: Review
# uses: diddlesnaps/snapcraft-review-tools-action@v1
# with:
# snap: ${{ steps.build.outputs.snap }}
# isClassic: 'false'
windows:
runs-on: windows-2019
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Setup Python
uses: actions/setup-python@v2
with:
python-version: '3.8.1'
architecture: 'x64'
- name: Setup Visual C++
uses: ilammy/msvc-dev-cmd@v1
with:
arch: x64
- name: Install venv
run: |
python -m pip install virtualenv
- name: Build sources
run: |
python configure.py build

29
o/ODM/ODM-2.8.7/.gitignore vendored Normal file
View File

@ -0,0 +1,29 @@
*~
bin/
include/
lib/
logs/
share/
src/
!modules/*
download/
SuperBuild/build/
SuperBuild/install/
build/
cmvs.tar.gz
parallel.tar.bz2
LAStools.zip
pcl.tar.gz
ceres-solver.tar.gz
*.pyc
opencv.zip
settings.yaml
.setupdevenv
__pycache__
*.snap

0
o/ODM/ODM-2.8.7/.gitmodules vendored Normal file
View File

1
o/ODM/ODM-2.8.7/CNAME Normal file
View File

@ -0,0 +1 @@
opendronemap.org

View File

@ -0,0 +1 @@
See https://github.com/OpenDroneMap/documents/blob/master/CONTRIBUTING.md

View File

@ -0,0 +1,48 @@
FROM ubuntu:21.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
WORKDIR /code
# Copy everything
COPY . ./
# Run the build
RUN bash configure.sh install
# Clean Superbuild
RUN bash configure.sh clean
### END Builder
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM ubuntu:21.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
WORKDIR /code
# Copy everything we built from the builder
COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
# Also run a smoke test on ODM and OpenSfM
RUN bash configure.sh installruntimedepsonly \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& bash run.sh --help \
&& bash -c "eval $(python3 /code/opendm/context.py) && python3 -c 'from opensfm import io, pymap'"
# Entry point
ENTRYPOINT ["python3", "/code/run.py"]

661
o/ODM/ODM-2.8.7/LICENSE Normal file
View File

@ -0,0 +1,661 @@
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.

293
o/ODM/ODM-2.8.7/README.md Normal file
View File

@ -0,0 +1,293 @@
![ODM Logo](https://user-images.githubusercontent.com/1951843/79699889-438ce580-8260-11ea-9c79-8667834aeab2.png)
An open source command line toolkit for processing aerial drone imagery. ODM turns simple 2D images into:
* Classified Point Clouds
* 3D Textured Models
* Georeferenced Orthorectified Imagery
* Georeferenced Digital Elevation Models
![images-diag](https://user-images.githubusercontent.com/1174901/96644651-5b205600-12f7-11eb-827b-8f4a3a6f3b21.png)
The application is available for Windows, Mac and Linux and it works from the command line, making it ideal for power users, scripts and for integration with other software.
If you would rather not type commands in a shell and are looking for a friendly user interface, check out [WebODM](https://github.com/OpenDroneMap/WebODM).
## Quickstart
The easiest way to run ODM on is via docker. To install docker, see [docs.docker.com](https://docs.docker.com). Once you have docker installed and [working](https://docs.docker.com/get-started/#test-docker-installation), you can run ODM by placing some images (JPEGs or TIFFs) in a folder named “images” (for example `C:\Users\youruser\datasets\project\images` or `/home/youruser/datasets/project/images`) and simply run from a Command Prompt / Terminal:
```bash
# Windows
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
# Mac/Linux
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
You can pass [additional parameters](https://docs.opendronemap.org/arguments/) by appending them to the command:
```bash
docker run -ti --rm -v /datasets:/datasets opendronemap/odm --project-path /datasets project [--additional --parameters --here]
```
For example, to generate a DSM (`--dsm`) and increase the orthophoto resolution (`--orthophoto-resolution 2`) :
```bash
docker run -ti --rm -v /datasets:/datasets opendronemap/odm --project-path /datasets project --dsm --orthophoto-resolution 2
```
## Viewing Results
When the process finishes, the results will be organized as follows:
|-- images/
|-- img-1234.jpg
|-- ...
|-- opensfm/
|-- see mapillary/opensfm repository for more info
|-- odm_meshing/
|-- odm_mesh.ply # A 3D mesh
|-- odm_texturing/
|-- odm_textured_model.obj # Textured mesh
|-- odm_textured_model_geo.obj # Georeferenced textured mesh
|-- odm_georeferencing/
|-- odm_georeferenced_model.laz # LAZ format point cloud
|-- odm_orthophoto/
|-- odm_orthophoto.tif # Orthophoto GeoTiff
You can use the following free and open source software to open the files generated in ODM:
* .tif (GeoTIFF): [QGIS](http://www.qgis.org/)
* .laz (Compressed LAS): [CloudCompare](https://www.cloudcompare.org/)
* .obj (Wavefront OBJ), .ply (Stanford Triangle Format): [MeshLab](http://www.meshlab.net/)
**Note!** Opening the .tif files generated by ODM in programs such as Photoshop or GIMP might not work (they are GeoTIFFs, not plain TIFFs). Use [QGIS](http://www.qgis.org/) instead.
## API
ODM can be made accessible from a network via [NodeODM](https://github.com/OpenDroneMap/NodeODM).
## Documentation
See http://docs.opendronemap.org for tutorials and more guides.
## Forum
We have a vibrant [community forum](https://community.opendronemap.org/). You can [search it](https://community.opendronemap.org/search?expanded=true) for issues you might be having with ODM and you can post questions there. We encourage users of ODM to participate in the forum and to engage with fellow drone mapping users.
## Windows Setup
ODM can be installed natively on Windows. Just download the latest setup from the [releases](https://github.com/OpenDroneMap/ODM/releases) page. After opening the ODM Console you can process datasets by typing:
```bash
run C:\Users\youruser\datasets\project [--additional --parameters --here]
```
## Snap Package
ODM is now available as a Snap Package from the Snap Store. To install you may use the Snap Store (available itself as a Snap Package) or the command line:
```bash
sudo snap install opendronemap
```
To run, you will need a terminal window into which you can type:
```bash
opendronemap
# or
snap run opendronemap
# or
/snap/bin/opendronemap
```
Snap packages will be kept up-to-date automatically, so you don't need to update ODM manually.
## GPU Acceleration
ODM has support for doing SIFT feature extraction on a GPU, which is about 2x faster than the CPU on a typical consumer laptop. To use this feature, you need to use the `opendronemap/odm:gpu` docker image instead of `opendronemap/odm` and you need to pass the `--gpus all` flag:
```
docker run -ti --rm -v c:/Users/youruser/datasets:/datasets --gpus all opendronemap/odm:gpu --project-path /datasets project
```
When you run ODM, if the GPU is recognized, in the first few lines of output you should see:
```
[INFO] Writing exif overrides
[INFO] Maximum photo dimensions: 4000px
[INFO] Found GPU device: Intel(R) OpenCL HD Graphics
[INFO] Using GPU for extracting SIFT features
```
The SIFT GPU implementation is CUDA-based, so should work with most NVIDIA graphics cards of the GTX 9xx Generation or newer.
If you have an NVIDIA card, you can test that docker is recognizing the GPU by running:
```
docker run --rm --gpus all nvidia/cuda:10.0-base nvidia-smi
```
If you see an output that looks like this:
```
Fri Jul 24 18:51:55 2020
+-----------------------------------------------------------------------------+
| NVIDIA-SMI 440.82 Driver Version: 440.82 CUDA Version: 10.2 |
|-------------------------------+----------------------+----------------------+
| GPU Name Persistence-M| Bus-Id Disp.A | Volatile Uncorr. ECC |
| Fan Temp Perf Pwr:Usage/Cap| Memory-Usage | GPU-Util Compute M. |
```
You're in good shape!
See https://github.com/NVIDIA/nvidia-docker and https://docs.nvidia.com/datacenter/cloud-native/container-toolkit/install-guide.html#docker for information on docker/NVIDIA setup.
## WSL or WSL2 Install
Note: This requires that you have installed WSL already by following [the instructions on Microsoft's Website](https://docs.microsoft.com/en-us/windows/wsl/install-win10).
You can run ODM via WSL or WSL2 by downloading the `rootfs.tar.gz` file from [the releases page on GitHub](https://github.com/OpenDroneMap/ODM/releases). Once you have the file saved to your `Downloads` folder in Windows, open a PowerShell or CMD window by right-clicking the Flag Menu (bottom left by default) and selecting "Windows PowerShell", or alternatively by using the [Windows Terminal from the Windows Store](https://www.microsoft.com/store/productId/9N0DX20HK701).
Inside a PowerShell window, or Windows Terminal running PowerShell, type the following:
```powershell
# PowerShell
wsl.exe --import ODM $env:APPDATA\ODM C:\path\to\your\Downloads\rootfs.tar.gz
```
Alternatively if you're using `CMD.exe` or the `CMD` support in Windows Terminal type:
```cmd
# CMD
wsl.exe --import ODM %APPDATA%\ODM C:\path\to\your\Downloads\rootfs.tar.gz
```
In either case, make sure you replace `C:\path\to\your\Downloads\rootfs.tar.gz` with the actual path to your `rootfs.tar.gz` file.
This will save a new Hard Disk image to your Windows `AppData` folder at `C:\Users\username\AppData\roaming\ODM` (where `username` is your Username in Windows), and will set-up a new WSL "distro" called `ODM`.
You may start the ODM distro by using the relevant option in the Windows Terminal (from the Windows Store) or by executing `wsl.exe -d ODM` in a PowerShell or CMD window.
ODM is installed to the distro's `/code` directory. You may execute it with:
```bash
/code/run.sh
```
### Updating ODM in WSL
The easiest way to update the installation of ODM is to download the new `rootfs.tar.gz` file and import it as another distro. You may then unregister the original instance the same way you delete ODM from WSL (see next heading).
### Deleting an ODM in WSL instance
```cmd
wsl.exe --unregister ODM
```
Finally you'll want to delete the files by using your Windows File Manager (Explorer) to navigate to `%APPDATA%`, find the `ODM` directory, and delete it by dragging it to the recycle bin. To permanently delete it empty the recycle bin.
If you have installed to a different directory by changing the `--import` command you ran to install you must use that directory name to delete the correct files. This is likely the case if you have multiple ODM installations or are updating an already-installed installation.
## Native Install (Ubuntu 21.04)
You can run ODM natively on Ubuntu 21.04 (although we don't recommend it):
1. Download the source from [here](https://github.com/OpenDroneMap/ODM/archive/master.zip)
2. Run `bash configure.sh install`
3. Download a sample dataset from [here](https://github.com/OpenDroneMap/odm_data_aukerman/archive/master.zip) (about 550MB) and extract it in `/datasets/aukerman`
4. Run `./run.sh --project-path /datasets odm_data_aukerman`
### Updating a native installation
When updating to a newer version of ODM, it is recommended that you run
bash configure.sh reinstall
to ensure all the dependent packages and modules get updated.
### Build From Source
If you want to rebuild your own docker image (if you have changed the source code, for example), from the ODM folder you can type:
```bash
docker build -t my_odm_image --no-cache .
```
When building your own Docker image, if image size is of importance to you, you should use the ```--squash``` flag, like so:
```bash
docker build --squash -t my_odm_image .
```
This will clean up intermediate steps in the Docker build process, resulting in a significantly smaller image (about half the size).
Experimental flags need to be enabled in Docker to use the ```--squash``` flag. To enable this, insert the following into the file `/etc/docker/daemon.json`:
```json
{
"experimental": true
}
```
After this, you must restart docker.
## Developers
Help improve our software! We welcome contributions from everyone, whether to add new features, improve speed, fix existing bugs or add support for more cameras. Check our [code of conduct](https://github.com/OpenDroneMap/documents/blob/master/CONDUCT.md), the [contributing guidelines](https://github.com/OpenDroneMap/documents/blob/master/CONTRIBUTING.md) and [how decisions are made](https://github.com/OpenDroneMap/documents/blob/master/GOVERNANCE.md#how-decisions-are-made).
For Linux users, the easiest way to modify the software is to make sure docker is installed, clone the repository and then run from a shell:
```bash
$ DATA=/path/to/datasets ./start-dev-env.sh
```
Where `/path/to/datasets` is a directory where you can place test datasets (it can also point to an empty directory if you don't have test datasets).
Run configure to set up the required third party libraries:
```bash
(odmdev) [user:/code] master+* ± bash configure.sh reinstall
```
You can now make changes to the ODM source. When you are ready to test the changes you can simply invoke:
```bash
(odmdev) [user:/code] master+* ± ./run.sh --project-path /datasets mydataset
```
If you have questions, join the developer's chat at https://community.opendronemap.org/c/developers-chat/21
1. Try to keep commits clean and simple
2. Submit a pull request with detailed changes and test results
3. Have fun!
### Troubleshooting
The dev environment makes use of `opendronemap/nodeodm` by default. You may want to run
`docker pull opendronemap/nodeodm` before running `./start-dev-env.sh` to avoid using an old cached version.
In order to make a clean build, remove `~/.odm-dev-home` and `ODM/.setupdevenv`.
## Credits
ODM makes use of [several libraries](https://github.com/OpenDroneMap/ODM/blob/master/snap/snapcraft.yaml#L36) and other awesome open source projects to perform its tasks. Among them we'd like to highlight:
- [OpenSfM](https://github.com/mapillary/OpenSfM)
- [OpenMVS](https://github.com/cdcseacave/openMVS/)
- [PDAL](https://github.com/PDAL/PDAL)
- [Entwine](https://entwine.io/)
- [MVS Texturing](https://github.com/nmoehrle/mvs-texturing)
- [GRASS GIS](https://grass.osgeo.org/)
- [GDAL](https://gdal.org/)
- [PoissonRecon](https://github.com/mkazhdan/PoissonRecon)
## Citation
> *OpenDroneMap Authors* ODM - A command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images. **OpenDroneMap/ODM GitHub Page** 2020; [https://github.com/OpenDroneMap/ODM](https://github.com/OpenDroneMap/ODM)
## Trademark
See [Trademark Guidelines](https://github.com/OpenDroneMap/documents/blob/master/TRADEMARK.md)

View File

@ -0,0 +1,219 @@
cmake_minimum_required(VERSION 3.1)
project(ODM-SuperBuild)
if (NOT CMAKE_BUILD_TYPE)
message(STATUS "No build type selected, default to Release")
set(CMAKE_BUILD_TYPE "Release")
endif()
# Setup SuperBuild root location
set(SB_ROOT_DIR ${CMAKE_CURRENT_SOURCE_DIR})
################################
# Setup SuperBuild directories #
################################
# Setup location where source tar-balls are downloaded
set(SB_DOWNLOAD_DIR "${SB_ROOT_DIR}/download"
CACHE PATH "Location where source tar-balls are (to be) downloaded.")
mark_as_advanced(SB_DOWNLOAD_DIR)
message(STATUS "SuperBuild files will be downloaded to: ${SB_DOWNLOAD_DIR}")
# Setup location where source tar-balls are located
set(SB_SOURCE_DIR "${SB_ROOT_DIR}/src"
CACHE PATH "Location where source tar-balls are (will be).")
mark_as_advanced(SB_SOURCE_DIR)
set(SB_BUILD_DIR "${SB_ROOT_DIR}/build")
message(STATUS "SuperBuild source files will be extracted to: ${SB_SOURCE_DIR}")
# Setup location where source tar-balls are located
set(SB_INSTALL_DIR "${SB_ROOT_DIR}/install"
CACHE PATH "Location where source tar-balls are (will be) installed.")
mark_as_advanced(SB_SOURCE_DIR)
message(STATUS "SuperBuild source files will be installed to: ${SB_INSTALL_DIR}")
# Setup location where binary files are located
set(SB_BINARY_DIR "${SB_ROOT_DIR}/build"
CACHE PATH "Location where files are (will be) located.")
mark_as_advanced(SB_BINARY_DIR)
message(STATUS "SuperBuild binary files will be located to: ${SB_BINARY_DIR}")
if (WIN32)
if (NOT DEFINED CMAKE_TOOLCHAIN_FILE)
message(FATAL_ERROR "CMAKE_TOOLCHAIN_FILE not set. You need to set it to the path of vcpkg.cmake")
endif()
get_filename_component(CMAKE_TOOLCHAIN_DIR ${CMAKE_TOOLCHAIN_FILE} DIRECTORY)
get_filename_component(VCPKG_ROOT "${CMAKE_TOOLCHAIN_DIR}/../../" ABSOLUTE)
set(WIN32_CMAKE_ARGS "-DCMAKE_TOOLCHAIN_FILE=${CMAKE_TOOLCHAIN_FILE}")
set(PYTHON_HOME "${SB_ROOT_DIR}/../venv")
set(PYTHON_EXE_PATH "${PYTHON_HOME}/Scripts/python")
# Use the GDAL version that comes with pip
set(GDAL_ROOT "${PYTHON_HOME}/Lib/site-packages/osgeo")
set(GDAL_LIBRARY "${GDAL_ROOT}/lib/gdal_i.lib")
set(GDAL_INCLUDE_DIR "${GDAL_ROOT}/include/gdal")
# Also download missing headers :/
if (NOT EXISTS "${GDAL_INCLUDE_DIR}/ogrsf_frmts.h")
file(DOWNLOAD "https://raw.githubusercontent.com/OSGeo/gdal/release/3.2/gdal/ogr/ogrsf_frmts/ogrsf_frmts.h" "${GDAL_INCLUDE_DIR}/ogrsf_frmts.h")
endif()
message("Copying VCPKG DLLs...")
file(GLOB COPY_DLLS "${VCPKG_ROOT}/installed/x64-windows/bin/*.dll")
file(COPY ${COPY_DLLS} DESTINATION "${SB_INSTALL_DIR}/bin")
message("Copying CUDA DLLs...")
file(GLOB CUDA_DLLS "$ENV{CUDA_PATH}/bin/cudart64*.dll")
file(COPY ${CUDA_DLLS} DESTINATION "${SB_INSTALL_DIR}/bin")
set(WIN32_GDAL_ARGS -DGDAL_FOUND=TRUE -DGDAL_LIBRARY=${GDAL_LIBRARY} -DGDAL_INCLUDE_DIR=${GDAL_INCLUDE_DIR})
else()
set(PYTHON_EXE_PATH "/usr/bin/python3")
endif()
# Path to additional CMake modules
set(CMAKE_MODULE_PATH ${SB_ROOT_DIR}/cmake)
include(ExternalProject)
include(ExternalProject-Setup)
#########################################
# Download and install third party libs #
#########################################
# ---------------------------------------------------------------------------------------------
# Open Source Computer Vision (OpenCV)
#
set(ODM_OpenCV_Version 2.4.11)
option(ODM_BUILD_OpenCV "Force to build OpenCV library" OFF)
SETUP_EXTERNAL_PROJECT(OpenCV ${ODM_OpenCV_Version} ${ODM_BUILD_OpenCV})
# ---------------------------------------------------------------------------------------------
# Point Cloud Library (PCL)
#
set(ODM_PCL_Version 1.8.0)
option(ODM_BUILD_PCL "Force to build PCL library" OFF)
SETUP_EXTERNAL_PROJECT(PCL ${ODM_PCL_Version} ${ODM_BUILD_PCL})
# ---------------------------------------------------------------------------------------------
# Google Flags library (GFlags)
#
set(ODM_GFlags_Version 2.1.2)
option(ODM_BUILD_GFlags "Force to build GFlags library" OFF)
SETUP_EXTERNAL_PROJECT(GFlags ${ODM_GFlags_Version} ${ODM_BUILD_GFlags})
# ---------------------------------------------------------------------------------------------
# Ceres Solver
#
set(ODM_Ceres_Version 2.0.0)
option(ODM_BUILD_Ceres "Force to build Ceres library" OFF)
SETUP_EXTERNAL_PROJECT(Ceres ${ODM_Ceres_Version} ${ODM_BUILD_Ceres})
# ---------------------------------------------------------------------------------------------
# Hexer
#
SETUP_EXTERNAL_PROJECT(Hexer 1.4 ON)
# ---------------------------------------------------------------------------------------------
# Open Structure from Motion (OpenSfM)
#
set(custom_libs OpenSfM
LASzip
PDAL
Untwine
Entwine
MvsTexturing
OpenMVS
FPCFilter
PyPopsift
Obj2Tiles
)
externalproject_add(mve
GIT_REPOSITORY https://github.com/OpenDroneMap/mve.git
GIT_TAG 262
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/mve
CMAKE_ARGS ${WIN32_CMAKE_ARGS}
BUILD_IN_SOURCE 1
INSTALL_COMMAND ""
)
foreach(lib ${custom_libs})
SETUP_EXTERNAL_PROJECT_CUSTOM(${lib})
endforeach()
include(ProcessorCount)
ProcessorCount(nproc)
if (WIN32)
set (POISSON_BUILD_CMD ${CMAKE_MAKE_PROGRAM} ${SB_SOURCE_DIR}/PoissonRecon/PoissonRecon.vcxproj /p:configuration=${CMAKE_BUILD_TYPE} /p:Platform=x64 /p:PlatformToolset=${CMAKE_VS_PLATFORM_TOOLSET} /p:WindowsTargetPlatformVersion=${CMAKE_VS_WINDOWS_TARGET_PLATFORM_VERSION})
set (POISSON_BIN_PATH "x64/${CMAKE_BUILD_TYPE}/PoissonRecon.exe")
else()
set (POISSON_BUILD_CMD make -j${nproc} poissonrecon)
set (POISSON_BIN_PATH "Linux/PoissonRecon")
endif()
externalproject_add(poissonrecon
GIT_REPOSITORY https://github.com/OpenDroneMap/PoissonRecon.git
GIT_TAG 272
PREFIX ${SB_BINARY_DIR}/PoissonRecon
SOURCE_DIR ${SB_SOURCE_DIR}/PoissonRecon
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_COMMAND ${POISSON_BUILD_CMD}
INSTALL_COMMAND ${CMAKE_COMMAND} -E copy ${SB_SOURCE_DIR}/PoissonRecon/Bin/${POISSON_BIN_PATH} ${SB_INSTALL_DIR}/bin
)
externalproject_add(dem2mesh
GIT_REPOSITORY https://github.com/OpenDroneMap/dem2mesh.git
GIT_TAG master
PREFIX ${SB_BINARY_DIR}/dem2mesh
SOURCE_DIR ${SB_SOURCE_DIR}/dem2mesh
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_GDAL_ARGS}
)
externalproject_add(dem2points
GIT_REPOSITORY https://github.com/OpenDroneMap/dem2points.git
GIT_TAG master
PREFIX ${SB_BINARY_DIR}/dem2points
SOURCE_DIR ${SB_SOURCE_DIR}/dem2points
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_GDAL_ARGS}
)
externalproject_add(odm_orthophoto
DEPENDS pcl opencv
GIT_REPOSITORY https://github.com/OpenDroneMap/odm_orthophoto.git
GIT_TAG main
PREFIX ${SB_BINARY_DIR}/odm_orthophoto
SOURCE_DIR ${SB_SOURCE_DIR}/odm_orthophoto
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS} ${WIN32_GDAL_ARGS}
)
externalproject_add(lastools
GIT_REPOSITORY https://github.com/OpenDroneMap/LAStools.git
GIT_TAG 250
PREFIX ${SB_BINARY_DIR}/lastools
SOURCE_DIR ${SB_SOURCE_DIR}/lastools
CMAKE_ARGS -DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
)

View File

@ -0,0 +1,33 @@
set(_proj_name ceres)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS gflags
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL http://ceres-solver.org/ceres-solver-2.0.0.tar.gz
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_C_FLAGS=-fPIC
-DCMAKE_CXX_FLAGS=-fPIC
-DBUILD_EXAMPLES=OFF
-DBUILD_TESTING=OFF
-DMINIGLOG=ON
-DMINIGLOG_MAX_LOG_LEVEL=-100
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,36 @@
set(_proj_name entwine)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
if (NOT WIN32)
set(EXTRA_CMAKE_ARGS -DCMAKE_CXX_FLAGS=-isystem\ ${SB_SOURCE_DIR}/pdal)
endif()
ExternalProject_Add(${_proj_name}
DEPENDS pdal
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/entwine/
GIT_TAG 285
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
${EXTRA_CMAKE_ARGS}
-DADDITIONAL_LINK_DIRECTORIES_PATHS=${SB_INSTALL_DIR}/lib
-DWITH_TESTS=OFF
-DWITH_ZSTD=OFF
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,27 @@
set(_proj_name fpcfilter)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/FPCFilter
GIT_TAG main
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,28 @@
set(_proj_name gflags)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/gflags/gflags
GIT_TAG 28f50e0fed19872e0fd50dd23ce2ee8cd759338e
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_POSITION_INDEPENDENT_CODE=ON
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,27 @@
set(_proj_name hexer)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/hobu/hexer/archive/bc748fc16b51c562f68f6641574b7af4244adfa2.tar.gz
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_GDAL_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,29 @@
set(_proj_name laszip)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
URL https://github.com/LASzip/LASzip/archive/0069c42307183c49744f1eb170f7032a8cf6a9db.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DBUILD_SHARED_LIBS=ON
-DBUILD_STATIC_LIBS=OFF
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
-DCMAKE_INSTALL_LIBDIR=lib
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,30 @@
set(_proj_name mvstexturing)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS mve
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}/${_proj_name}
GIT_REPOSITORY https://github.com/OpenDroneMap/mvs-texturing
GIT_TAG 287
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DRESEARCH=OFF
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,33 @@
set(_proj_name obj2tiles)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
set(OBJ2TILES_VERSION v1.0.7)
set(OBJ2TILES_EXT "")
set(OBJ2TILES_ARCH "Linux64")
if (WIN32)
set(OBJ2TILES_ARCH "Win64")
set(OBJ2TILES_EXT ".exe")
elseif(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64")
set(OBJ2TILES_ARCH "LinuxArm")
endif()
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/OpenDroneMap/Obj2Tiles/releases/download/${OBJ2TILES_VERSION}/Obj2Tiles-${OBJ2TILES_ARCH}.zip
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
UPDATE_COMMAND ""
CONFIGURE_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_COMMAND ""
INSTALL_COMMAND ${CMAKE_COMMAND} -E copy ${SB_SOURCE_DIR}/${_proj_name}/Obj2Tiles${OBJ2TILES_EXT} ${SB_INSTALL_DIR}/bin
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,72 @@
set(_proj_name opencv)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
if (WIN32)
set(WIN32_CMAKE_EXTRA_ARGS -DPYTHON3_NUMPY_INCLUDE_DIRS=${PYTHON_HOME}/lib/site-packages/numpy/core/include
-DPYTHON3_PACKAGES_PATH=${PYTHON_HOME}/lib/site-packages
-DPYTHON3_EXECUTABLE=${PYTHON_EXE_PATH}
-DWITH_MSMF=OFF
-DOPENCV_LIB_INSTALL_PATH=${SB_INSTALL_DIR}/lib
-DOPENCV_BIN_INSTALL_PATH=${SB_INSTALL_DIR}/bin)
endif()
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/opencv/opencv/archive/4.5.0.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DBUILD_opencv_core=ON
-DBUILD_opencv_imgproc=ON
-DBUILD_opencv_highgui=ON
-DBUILD_opencv_video=ON
-DBUILD_opencv_ml=ON
-DBUILD_opencv_features2d=ON
-DBUILD_opencv_calib3d=ON
-DBUILD_opencv_contrib=ON
-DBUILD_opencv_flann=ON
-DBUILD_opencv_objdetect=ON
-DBUILD_opencv_photo=ON
-DBUILD_opencv_legacy=ON
-DBUILD_opencv_python3=ON
-DWITH_FFMPEG=OFF
-DWITH_CUDA=OFF
-DWITH_GTK=OFF
-DWITH_VTK=OFF
-DWITH_EIGEN=OFF
-DWITH_OPENNI=OFF
-DBUILD_EXAMPLES=OFF
-DBUILD_TESTS=OFF
-DBUILD_PERF_TESTS=OFF
-DBUILD_DOCS=OFF
-DBUILD_opencv_apps=OFF
-DBUILD_opencv_gpu=OFF
-DBUILD_opencv_videostab=OFF
-DBUILD_opencv_nonfree=OFF
-DBUILD_opencv_stitching=OFF
-DBUILD_opencv_world=OFF
-DBUILD_opencv_superres=OFF
-DBUILD_opencv_java=OFF
-DBUILD_opencv_ocl=OFF
-DBUILD_opencv_ts=OFF
-DBUILD_opencv_xfeatures2d=ON
-DOPENCV_ALLOCATOR_STATS_COUNTER_TYPE=int64_t
-DCMAKE_BUILD_TYPE:STRING=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${WIN32_CMAKE_EXTRA_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,78 @@
set(_proj_name openmvs)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
externalproject_add(vcg
GIT_REPOSITORY https://github.com/OpenDroneMap/VCG.git
GIT_TAG 285
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/vcg
CONFIGURE_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_COMMAND ""
INSTALL_COMMAND ""
)
externalproject_add(eigen34
GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git
GIT_TAG 3.4
UPDATE_COMMAND ""
SOURCE_DIR ${SB_SOURCE_DIR}/eigen34
CONFIGURE_COMMAND ""
BUILD_IN_SOURCE 1
BUILD_COMMAND ""
INSTALL_COMMAND ""
)
SET(ARM64_CMAKE_ARGS "")
if(${CMAKE_SYSTEM_PROCESSOR} STREQUAL "aarch64" )
SET(ARM64_CMAKE_ARGS -DOpenMVS_USE_SSE=OFF)
endif()
SET(GPU_CMAKE_ARGS "")
if(UNIX)
if (EXISTS "/usr/local/cuda/lib64/stubs")
SET(GPU_CMAKE_ARGS -DCMAKE_LIBRARY_PATH=/usr/local/cuda/lib64/stubs)
endif()
endif()
if(WIN32)
# On Windows systems without NVIDIA GPUs, OpenMVS will not launch
# unless a CUDA DLL is available; we download a dummy DLL
# generated with https://github.com/ykhwong/dummy-dll-generator that is
# loaded UNLESS the real CUDA DLL is available, since it will
# be loaded before our dummy DLL.
file(DOWNLOAD "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/nvcuda_dummy.dll" "${SB_INSTALL_DIR}/bin/nvcuda.dll")
endif()
ExternalProject_Add(${_proj_name}
DEPENDS ceres opencv vcg eigen34
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/openMVS
GIT_TAG 287
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DOpenCV_DIR=${SB_INSTALL_DIR}/lib/cmake/opencv4
-DVCG_ROOT=${SB_SOURCE_DIR}/vcg
-DEIGEN3_INCLUDE_DIR=${SB_SOURCE_DIR}/eigen34/
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
-DOpenMVS_MAX_CUDA_COMPATIBILITY=ON
${GPU_CMAKE_ARGS}
${WIN32_CMAKE_ARGS}
${ARM64_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,43 @@
set(_proj_name opensfm)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
include(ProcessorCount)
ProcessorCount(nproc)
if(WIN32)
set(OpenCV_DIR "${SB_INSTALL_DIR}/x64/vc16/lib")
set(BUILD_CMD ${CMAKE_COMMAND} --build "${SB_BUILD_DIR}/opensfm" --config "${CMAKE_BUILD_TYPE}")
else()
set(OpenCV_DIR "${SB_INSTALL_DIR}/lib/cmake/opencv4")
set(BUILD_CMD make "-j${nproc}")
endif()
ExternalProject_Add(${_proj_name}
DEPENDS ceres opencv gflags
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/OpenSfM/
GIT_TAG 287
#--Update/Patch step----------
UPDATE_COMMAND git submodule update --init --recursive
#--Configure step-------------
SOURCE_DIR ${SB_INSTALL_DIR}/bin/${_proj_name}
CONFIGURE_COMMAND ${CMAKE_COMMAND} <SOURCE_DIR>/${_proj_name}/src
-DCERES_ROOT_DIR=${SB_INSTALL_DIR}
-DOpenCV_DIR=${OpenCV_DIR}
-DADDITIONAL_INCLUDE_DIRS=${SB_INSTALL_DIR}/include
-DOPENSFM_BUILD_TESTS=off
-DPYTHON_EXECUTABLE=${PYTHON_EXE_PATH}
${WIN32_CMAKE_ARGS}
BUILD_COMMAND ${BUILD_CMD}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_COMMAND ""
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,55 @@
set(_proj_name pcl)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/PointCloudLibrary/pcl/archive/refs/tags/pcl-1.11.1.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DBUILD_features=OFF
-DBUILD_filters=OFF
-DBUILD_geometry=OFF
-DBUILD_keypoints=OFF
-DBUILD_outofcore=OFF
-DBUILD_people=OFF
-DBUILD_recognition=OFF
-DBUILD_registration=OFF
-DBUILD_sample_consensus=OFF
-DBUILD_segmentation=OFF
-DBUILD_features=OFF
-DBUILD_surface_on_nurbs=OFF
-DBUILD_tools=OFF
-DBUILD_tracking=OFF
-DBUILD_visualization=OFF
-DWITH_OPENGL=OFF
-DWITH_VTK=OFF
-DWITH_QT=OFF
-DBUILD_OPENNI=OFF
-DBUILD_OPENNI2=OFF
-DWITH_OPENNI=OFF
-DWITH_OPENNI2=OFF
-DWITH_FZAPI=OFF
-DWITH_LIBUSB=OFF
-DWITH_PCAP=OFF
-DWITH_PXCAPI=OFF
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DPCL_VERBOSITY_LEVEL=Error
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
-DPCL_BUILD_WITH_FLANN_DYNAMIC_LINKING_WIN32=ON
${WIN32_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,60 @@
set(_proj_name pdal)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
if (WIN32)
set(LASZIP_LIB "${SB_INSTALL_DIR}/lib/laszip.lib")
else()
set(LASZIP_LIB "${SB_INSTALL_DIR}/lib/liblaszip.so")
endif()
ExternalProject_Add(${_proj_name}
DEPENDS hexer laszip
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
URL https://github.com/PDAL/PDAL/archive/refs/tags/2.3RC1.zip
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DBUILD_PGPOINTCLOUD_TESTS=OFF
-DBUILD_PLUGIN_PGPOINTCLOUD=OFF
-DBUILD_PLUGIN_CPD=OFF
-DBUILD_PLUGIN_GREYHOUND=OFF
-DBUILD_PLUGIN_HEXBIN=ON
-DBUILD_PLUGIN_ICEBRIDGE=OFF
-DBUILD_PLUGIN_MRSID=OFF
-DBUILD_PLUGIN_NITF=OFF
-DBUILD_PLUGIN_OCI=OFF
-DBUILD_PLUGIN_P2G=OFF
-DBUILD_PLUGIN_SQLITE=OFF
-DBUILD_PLUGIN_RIVLIB=OFF
-DBUILD_PLUGIN_PYTHON=OFF
-DWITH_ZSTD=OFF
-DENABLE_CTEST=OFF
-DWITH_APPS=ON
-DWITH_LAZPERF=OFF
-DWITH_GEOTIFF=ON
-DWITH_LASZIP=ON
-DLASZIP_FOUND=TRUE
-DLASZIP_LIBRARIES=${LASZIP_LIB}
-DLASZIP_VERSION=3.1.1
-DLASZIP_INCLUDE_DIR=${SB_INSTALL_DIR}/include
-DLASZIP_LIBRARY=${LASZIP_LIB}
-DWITH_TESTS=OFF
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${WIN32_GDAL_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,36 @@
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/pypopsift")
# Pypopsift
find_package(CUDA 7.0)
if(CUDA_FOUND)
ExternalProject_Add(pypopsift
DEPENDS opensfm
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/pypopsift
GIT_TAG 281
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/pypopsift
CMAKE_ARGS
-DOUTPUT_DIR=${SB_INSTALL_DIR}/bin/opensfm/opensfm
-DCMAKE_INSTALL_PREFIX=${SB_INSTALL_DIR}
${WIN32_CMAKE_ARGS}
${ARM64_CMAKE_ARGS}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)
else()
message(WARNING "Could not find CUDA >= 7.0")
endif()

View File

@ -0,0 +1,29 @@
set(_proj_name untwine)
set(_SB_BINARY_DIR "${SB_BINARY_DIR}/${_proj_name}")
ExternalProject_Add(${_proj_name}
DEPENDS pdal
PREFIX ${_SB_BINARY_DIR}
TMP_DIR ${_SB_BINARY_DIR}/tmp
STAMP_DIR ${_SB_BINARY_DIR}/stamp
#--Download step--------------
DOWNLOAD_DIR ${SB_DOWNLOAD_DIR}
GIT_REPOSITORY https://github.com/OpenDroneMap/untwine/
GIT_TAG 285
#--Update/Patch step----------
UPDATE_COMMAND ""
#--Configure step-------------
SOURCE_DIR ${SB_SOURCE_DIR}/${_proj_name}
CMAKE_ARGS
-DPDAL_DIR=${SB_INSTALL_DIR}/lib/cmake/PDAL
-DCMAKE_BUILD_TYPE=${CMAKE_BUILD_TYPE}
-DCMAKE_INSTALL_PREFIX:PATH=${SB_INSTALL_DIR}
#--Build step-----------------
BINARY_DIR ${_SB_BINARY_DIR}
#--Install step---------------
INSTALL_DIR ${SB_INSTALL_DIR}
#--Output logging-------------
LOG_DOWNLOAD OFF
LOG_CONFIGURE OFF
LOG_BUILD OFF
)

View File

@ -0,0 +1,27 @@
set(ADD_INTERNAL_LIB_MSG "--- Adding internal version")
set(FORCE_BUILD_LIB_MSG "force build ${ADD_INTERNAL_LIB_MSG}")
macro(SETUP_EXTERNAL_PROJECT name version force_build)
if(NOT ${force_build})
find_package(${name} ${version} EXACT QUIET)
if(${${name}_FOUND})
message(STATUS "${name} ${${name}_VERSION} found")
set(${name}_DIR ${${name}_DIR})
else()
message(STATUS "${name} ${version} not found ${ADD_INTERNAL_LIB_MSG}")
include(External-${name})
endif()
else()
message(STATUS "${name} ${version} ${FORCE_BUILD_LIB_MSG}")
include(External-${name})
endif()
endmacro()
macro(SETUP_EXTERNAL_PROJECT_CUSTOM name)
message(STATUS "${name} ${FORCE_BUILD_LIB_MSG}")
include(External-${name})
endmacro()

1
o/ODM/ODM-2.8.7/VERSION Normal file
View File

@ -0,0 +1 @@
2.8.7

View File

@ -0,0 +1 @@
See https://github.com/OpenDroneMap/documents/blob/master/CONDUCT.md

View File

@ -0,0 +1,210 @@
import sys, platform
if sys.platform != 'win32':
print("This script is for Windows only! Use configure.sh instead.")
exit(1)
if sys.version_info.major != 3 or sys.version_info.minor != 8:
print("You need to use Python 3.8.x (due to the requirements.txt). You are using %s instead." % platform.python_version())
exit(1)
import argparse
import subprocess
import os
import stat
import urllib.request
import shutil
import zipfile
from venv import EnvBuilder
parser = argparse.ArgumentParser(description='ODM Windows Configure Script')
parser.add_argument('action',
type=str,
choices=["build", "clean", "dist", "vcpkg_export"],
help='Action: %(choices)s')
parser.add_argument('--build-vcpkg',
type=bool,
help='Build VCPKG environment from scratch instead of downloading prebuilt one.')
parser.add_argument('--vcpkg-archive-url',
type=str,
default='https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/vcpkg-export-250.zip',
required=False,
help='Path to VCPKG export archive')
parser.add_argument('--code-sign-cert-path',
type=str,
default='',
required=False,
help='Path to pfx code signing certificate')
args = parser.parse_args()
def run(cmd, cwd=os.getcwd()):
env = os.environ.copy()
print(cmd)
p = subprocess.Popen(cmd, shell=True, env=env, cwd=cwd)
retcode = p.wait()
if retcode != 0:
raise Exception("Command returned %s" % retcode)
# https://izziswift.com/shutil-rmtree-fails-on-windows-with-access-is-denied/
def rmtree(top):
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
filename = os.path.join(root, name)
os.chmod(filename, stat.S_IWUSR)
os.remove(filename)
for name in dirs:
os.rmdir(os.path.join(root, name))
os.rmdir(top)
def vcpkg_requirements():
with open("vcpkg-requirements.txt") as f:
pckgs = list(filter(lambda l: len(l) > 0, map(str.strip, f.read().split("\n"))))
return pckgs
def build():
# Create python virtual env
if not os.path.isdir("venv"):
print("Creating virtual env --> venv/")
ebuilder = EnvBuilder(with_pip=True)
ebuilder.create("venv")
run("venv\\Scripts\\pip install --ignore-installed -r requirements.txt")
# Download / build VCPKG environment
if not os.path.isdir("vcpkg"):
if args.build_vcpkg:
print("TODO")
# git clone vcpkg repo
# bootstrap
# install requirements
else:
if not os.path.exists("vcpkg-env.zip"):
print("Downloading %s" % args.vcpkg_archive_url)
with urllib.request.urlopen(args.vcpkg_archive_url) as response, open( "vcpkg-env.zip", 'wb') as out_file:
shutil.copyfileobj(response, out_file)
if not os.path.exists("vcpkg"):
print("Extracting vcpkg-env.zip --> vcpkg/")
with zipfile.ZipFile("vcpkg-env.zip") as z:
top_dir = z.namelist()[0]
z.extractall(".")
if os.path.exists(top_dir):
os.rename(top_dir, "vcpkg")
else:
print("Warning! Something looks wrong in the VCPKG archive... check the vcpkg/ directory.")
safe_remove("vcpkg-env.zip")
if not os.path.exists(os.path.join("SuperBuild", "build")) or not os.path.exists(os.path.join("SuperBuild", "install")):
print("Compiling SuperBuild")
build_dir = os.path.join("SuperBuild", "build")
if not os.path.isdir(build_dir):
os.mkdir(build_dir)
toolchain_file = os.path.join(os.getcwd(), "vcpkg", "scripts", "buildsystems", "vcpkg.cmake")
run("cmake .. -DCMAKE_TOOLCHAIN_FILE=\"%s\"" % toolchain_file, cwd=build_dir)
run("cmake --build . --config Release", cwd=build_dir)
def vcpkg_export():
if not os.path.exists("vcpkg"):
print("vcpkg directory does not exist. Did you build the environment?")
exit(1)
pkgs = vcpkg_requirements()
out = "vcpkg-export-%s" % odm_version().replace(".", "")
run("vcpkg\\vcpkg export %s --output=%s --zip" % (" ".join(pkgs), out))
def odm_version():
with open("VERSION") as f:
return f.read().split("\n")[0].strip()
def safe_remove(path):
if os.path.isdir(path):
rmtree(path)
elif os.path.isfile(path):
os.remove(path)
def clean():
safe_remove("vcpkg-download.zip")
safe_remove("vcpkg")
safe_remove("venv")
safe_remove(os.path.join("SuperBuild", "build"))
safe_remove(os.path.join("SuperBuild", "download"))
safe_remove(os.path.join("SuperBuild", "src"))
safe_remove(os.path.join("SuperBuild", "install"))
def dist():
if not os.path.exists("SuperBuild\\download"):
print("You need to run configure.py build before you can run dist")
exit(1)
# Download VC++ runtime
vcredist_path = os.path.join("SuperBuild", "download", "vc_redist.x64.zip")
if not os.path.isfile(vcredist_path):
vcredist_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/VC_redist.x64.zip"
print("Downloading %s" % vcredist_url)
with urllib.request.urlopen(vcredist_url) as response, open(vcredist_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
print("Extracting --> vc_redist.x64.exe")
with zipfile.ZipFile(vcredist_path) as z:
z.extractall(os.path.join("SuperBuild", "download"))
# Download portable python
if not os.path.isdir("python38"):
pythonzip_path = os.path.join("SuperBuild", "download", "python38.zip")
python_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/python-3.8.1-embed-amd64-less-pth.zip"
if not os.path.exists(pythonzip_path):
print("Downloading %s" % python_url)
with urllib.request.urlopen(python_url) as response, open( pythonzip_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
os.mkdir("python38")
print("Extracting --> python38/")
with zipfile.ZipFile(pythonzip_path) as z:
z.extractall("python38")
# Download signtool
signtool_path = os.path.join("SuperBuild", "download", "signtool.exe")
signtool_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/signtool.exe"
if not os.path.exists(signtool_path):
print("Downloading %s" % signtool_url)
with urllib.request.urlopen(signtool_url) as response, open(signtool_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
# Download innosetup
if not os.path.isdir("innosetup"):
innosetupzip_path = os.path.join("SuperBuild", "download", "innosetup.zip")
innosetup_url = "https://github.com/OpenDroneMap/windows-deps/releases/download/2.5.0/innosetup-portable-win32-6.0.5-3.zip"
if not os.path.exists(innosetupzip_path):
print("Downloading %s" % innosetup_url)
with urllib.request.urlopen(innosetup_url) as response, open(innosetupzip_path, 'wb') as out_file:
shutil.copyfileobj(response, out_file)
os.mkdir("innosetup")
print("Extracting --> innosetup/")
with zipfile.ZipFile(innosetupzip_path) as z:
z.extractall("innosetup")
# Run
cs_flags = ""
if args.code_sign_cert_path:
cs_flags = '"/Ssigntool=%s sign /f %s /fd SHA1 /t http://timestamp.sectigo.com $f"' % (signtool_path, args.code_sign_cert_path)
run("innosetup\\iscc /Qp " + cs_flags + " \"innosetup.iss\"")
print("Done! Setup created in dist/")
if args.action == 'build':
build()
elif args.action == 'vcpkg_export':
vcpkg_export()
elif args.action == 'dist':
dist()
elif args.action == 'clean':
clean()
else:
args.print_help()
exit(1)

217
o/ODM/ODM-2.8.7/configure.sh Executable file
View File

@ -0,0 +1,217 @@
#!/bin/bash
# Ensure the DEBIAN_FRONTEND environment variable is set for apt-get calls
APT_GET="env DEBIAN_FRONTEND=noninteractive $(command -v apt-get)"
check_version(){
UBUNTU_VERSION=$(lsb_release -r)
case "$UBUNTU_VERSION" in
*"20.04"*|*"21.04"*)
echo "Ubuntu: $UBUNTU_VERSION, good!"
;;
*"18.04"*|*"16.04"*)
echo "ODM 2.1 has upgraded to Ubuntu 21.04, but you're on $UBUNTU_VERSION"
echo "* The last version of ODM that supports Ubuntu 16.04 is v1.0.2."
echo "* The last version of ODM that supports Ubuntu 18.04 is v2.0.0."
echo "We recommend you to upgrade, or better yet, use docker."
exit 1
;;
*)
echo "You are not on Ubuntu 21.04 (detected: $UBUNTU_VERSION)"
echo "It might be possible to run ODM on a newer version of Ubuntu, however, you cannot rely on this script."
exit 1
;;
esac
}
if [[ $2 =~ ^[0-9]+$ ]] ; then
processes=$2
else
processes=$(nproc)
fi
ensure_prereqs() {
export DEBIAN_FRONTEND=noninteractive
if ! command -v sudo &> /dev/null; then
echo "Installing sudo"
$APT_GET update
$APT_GET install -y -qq --no-install-recommends sudo
else
sudo $APT_GET update
fi
if ! command -v lsb_release &> /dev/null; then
echo "Installing lsb_release"
sudo $APT_GET install -y -qq --no-install-recommends lsb-release
fi
if ! command -v pkg-config &> /dev/null; then
echo "Installing pkg-config"
sudo $APT_GET install -y -qq --no-install-recommends pkg-config
fi
echo "Installing tzdata"
sudo $APT_GET install -y -qq tzdata
UBUNTU_VERSION=$(lsb_release -r)
if [[ "$UBUNTU_VERSION" == *"20.04"* ]]; then
echo "Enabling PPA for Ubuntu GIS"
sudo $APT_GET install -y -qq --no-install-recommends software-properties-common
sudo add-apt-repository -y ppa:ubuntugis/ubuntugis-unstable
sudo $APT_GET update
fi
echo "Installing Python PIP"
sudo $APT_GET install -y -qq --no-install-recommends \
python3-pip \
python3-setuptools
sudo pip3 install -U pip
sudo pip3 install -U shyaml
}
# Save all dependencies in snapcraft.yaml to maintain a single source of truth.
# Maintaining multiple lists will otherwise be painful.
installdepsfromsnapcraft() {
section="$2"
case "$1" in
build) key=build-packages; ;;
runtime) key=stage-packages; ;;
*) key=build-packages; ;; # shouldn't be needed, but it's here just in case
esac
UBUNTU_VERSION=$(lsb_release -r)
SNAPCRAFT_FILE="snapcraft.yaml"
if [[ "$UBUNTU_VERSION" == *"21.04"* ]]; then
SNAPCRAFT_FILE="snapcraft21.yaml"
fi
cat snap/$SNAPCRAFT_FILE | \
shyaml get-values-0 parts.$section.$key | \
xargs -0 sudo $APT_GET install -y -qq --no-install-recommends
}
installruntimedepsonly() {
echo "Installing runtime dependencies"
ensure_prereqs
check_version
echo "Installing Required Requisites"
installdepsfromsnapcraft runtime prereqs
echo "Installing OpenCV Dependencies"
installdepsfromsnapcraft runtime opencv
echo "Installing OpenSfM Dependencies"
installdepsfromsnapcraft runtime opensfm
echo "Installing OpenMVS Dependencies"
installdepsfromsnapcraft runtime openmvs
}
installreqs() {
cd /code
## Set up library paths
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$RUNPATH/SuperBuild/install/lib
## Before installing
echo "Updating the system"
ensure_prereqs
check_version
echo "Installing Required Requisites"
installdepsfromsnapcraft build prereqs
echo "Installing OpenCV Dependencies"
installdepsfromsnapcraft build opencv
echo "Installing OpenSfM Dependencies"
installdepsfromsnapcraft build opensfm
echo "Installing OpenMVS Dependencies"
installdepsfromsnapcraft build openmvs
set -e
pip install --ignore-installed -r requirements.txt
#if [ ! -z "$GPU_INSTALL" ]; then
#fi
set +e
}
install() {
installreqs
if [ ! -z "$PORTABLE_INSTALL" ]; then
echo "Replacing g++ and gcc with our scripts for portability..."
if [ ! -e /usr/bin/gcc_real ]; then
sudo mv -v /usr/bin/gcc /usr/bin/gcc_real
sudo cp -v ./docker/gcc /usr/bin/gcc
fi
if [ ! -e /usr/bin/g++_real ]; then
sudo mv -v /usr/bin/g++ /usr/bin/g++_real
sudo cp -v ./docker/g++ /usr/bin/g++
fi
fi
set -eo pipefail
echo "Compiling SuperBuild"
cd ${RUNPATH}/SuperBuild
mkdir -p build && cd build
cmake .. && make -j$processes
echo "Configuration Finished"
}
uninstall() {
check_version
echo "Removing SuperBuild and build directories"
cd ${RUNPATH}/SuperBuild
rm -rfv build src download install
cd ../
rm -rfv build
}
reinstall() {
check_version
echo "Reinstalling ODM modules"
uninstall
install
}
clean() {
rm -rf \
${RUNPATH}/SuperBuild/build \
${RUNPATH}/SuperBuild/download \
${RUNPATH}/SuperBuild/src
# find in /code and delete static libraries and intermediate object files
find ${RUNPATH} -type f -name "*.a" -delete -or -type f -name "*.o" -delete
}
usage() {
echo "Usage:"
echo "bash configure.sh <install|update|uninstall|installreqs|help> [nproc]"
echo "Subcommands:"
echo " install"
echo " Installs all dependencies and modules for running OpenDroneMap"
echo " installruntimedepsonly"
echo " Installs *only* the runtime libraries (used by docker builds). To build from source, use the 'install' command."
echo " reinstall"
echo " Removes SuperBuild and build modules, then re-installs them. Note this does not update OpenDroneMap to the latest version. "
echo " uninstall"
echo " Removes SuperBuild and build modules. Does not uninstall dependencies"
echo " installreqs"
echo " Only installs the requirements (does not build SuperBuild)"
echo " clean"
echo " Cleans the SuperBuild directory by removing temporary files. "
echo " help"
echo " Displays this message"
echo "[nproc] is an optional argument that can set the number of processes for the make -j tag. By default it uses $(nproc)"
}
if [[ $1 =~ ^(install|installruntimedepsonly|reinstall|uninstall|installreqs|clean)$ ]]; then
RUNPATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
"$1"
else
echo "Invalid instructions." >&2
usage
exit 1
fi

View File

@ -0,0 +1,8 @@
@echo off
setlocal
call win32env.bat
start "ODM Console" cmd /k "echo ____________________________ && echo / ____ _____ __ __ \ && echo ^| / __ \ ^| __ \ ^| \/ ^| ^| && echo ^| ^| ^| ^| ^| ^| ^| ^| ^| ^| \ / ^| ^| && echo ^| ^| ^| ^| ^| ^| ^| ^| ^| ^| ^|\/^| ^| ^| && echo ^| ^| ^|__^| ^| ^| ^|__^| ^| ^| ^| ^| ^| ^| && echo ^| \____/ ^|_____/ ^|_^| ^|_^| ^| && echo \____________________________/ && @echo off && FOR /F %%i in (VERSION) do echo version: %%i && @echo on && echo. && run --help
endlocal

View File

@ -0,0 +1,41 @@
# Blender scripts
# odm_photo
Renders photos from ODM generated texture models.
Currently can produce 360 panoramic photos and 360 3D panoramic (VR) photos.
NB: the default resolution for 360 photos is 6000x3000 (maximum supported by Facebook).
## Requirements
* Blender
* ExifTool (must be on your PATH)
## Usage
To generate a 360 panoramic photo:
blender -b photo_360.blend --python odm_photo.py -- <project-path>
Output is `<project-path>/odm_photo/odm_photo_360.jpg`.
To generate a 360 3D panoramic photo:
blender -b photo_vr.blend --python odm_photo.py -- <project-path>
Output is `<project-path>/odm_photo/odm_photo_vr_L.jpg` and `<project-path>/odm_photo/odm_photo_vr_R.jpg`.
**NB: argument order matters!**
# odm_video
Renders videos from ODM generated texture models.
Currently can produce 360 panoramic videos.
NB: the default resolution is 4096x2048 (maximum supported by Facebook).
## Requirements
* Blender
* Python 2.7 (must be on your PATH)
* Spatial Media Metadata Injector (https://github.com/google/spatial-media/tree/master/spatialmedia) (place in `spatialmedia` subdirectory)
## Usage
To generate a 360 panoramic photo:
blender -b photo_360.blend --python odm_video.py -- <project-path> <camera-waypoints.xyz> <number-of-frames>
Output is `<project-path>/odm_video/odm_video_360.mp4`.

View File

@ -0,0 +1,45 @@
import bpy
import materials_utils
def loadMesh(file):
bpy.utils.register_module('materials_utils')
bpy.ops.import_scene.obj(filepath=file,
axis_forward='Y',
axis_up='Z')
bpy.ops.xps_tools.convert_to_cycles_all()
model = bpy.data.objects[-1]
minX = float('inf')
maxX = float('-inf')
minY = float('inf')
maxY = float('-inf')
minZ = float('inf')
maxZ = float('-inf')
for coord in model.bound_box:
x = coord[0]
y = coord[1]
z = coord[2]
minX = min(x, minX)
maxX = max(x, maxX)
minY = min(y, minY)
maxY = max(y, maxY)
minZ = min(z, minZ)
maxZ = max(z, maxZ)
model.location[2] += (maxZ - minZ)/2
surfaceShaderType = 'ShaderNodeEmission'
surfaceShaderName = 'Emission'
for m in bpy.data.materials:
nt = m.node_tree
nt.nodes.remove(nt.nodes['Color Mult'])
nt.nodes.remove(nt.nodes['Diffuse BSDF'])
nt.nodes.new(surfaceShaderType)
nt.links.new(nt.nodes['Material Output'].inputs[0],
nt.nodes[surfaceShaderName].outputs[0])
nt.links.new(nt.nodes[surfaceShaderName].inputs[0],
nt.nodes['Diffuse Texture'].outputs[0])

View File

@ -0,0 +1,65 @@
#!/usr/bin/env python
# Renders a photo.
# ExifTool must be on your PATH.
# To generate a 360 panoramic photo:
# blender -b photo_360.blend --python odm_photo.py -- <project-path>
# To generate a 360 3D panoramic photo:
# blender -b photo_vr.blend --python odm_photo.py -- <project-path>
# NB: argument order matters!
import sys
import bpy
import subprocess
from common import loadMesh
def main():
if len(sys.argv) < 5 or sys.argv[-2] != '--':
sys.exit('Please provide the ODM project path.')
projectHome = sys.argv[-1]
loadMesh(projectHome +
'/odm_texturing/odm_textured_model_geo.obj')
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
fileName = projectHome + '/odm_photo/odm_' + blendName
render = bpy.data.scenes['Scene'].render
render.filepath = fileName
bpy.ops.render.render(write_still=True)
width = render.resolution_x
height = render.resolution_y
if(render.use_multiview):
writeExif(fileName+render.views[0].file_suffix+'.jpg', width, height)
writeExif(fileName+render.views[1].file_suffix+'.jpg', width, height)
else:
writeExif(fileName+'.jpg', width, height)
def writeExif(fileName, width, height):
w = str(width)
h = str(height)
subprocess.run(['exiftool',
'-overwrite_original',
'-CroppedAreaImageWidthPixels=' + w,
'-CroppedAreaImageHeightPixels=' + h,
'-FullPanoWidthPixels=' + w,
'-FullPanoHeightPixels=' + h,
'-CroppedAreaLeftPixels=0',
'-CroppedAreaTopPixels=0',
'-ProjectionType=equirectangular',
'-UsePanoramaViewer=True',
'-PoseHeadingDegrees=0',
'-LargestValidInteriorRectLeft=0',
'-LargestValidInteriorRectTop=0',
'-LargestValidInteriorRectWidth=' + w,
'-LargestValidInteriorRectHeight=' + h,
fileName])
if __name__ == '__main__':
main()

View File

@ -0,0 +1,113 @@
#!/usr/bin/env python
# Renders a video.
# To generate a 360 panoramic video:
# blender -b photo_360.blend --python odm_video.py -- <project-path> <camera-waypoints.xyz> <number-of-frames>
import sys
import subprocess
import os
import bpy
from common import loadMesh
def main():
if len(sys.argv) < 7 or sys.argv[-4] != '--':
sys.exit('Please provide the ODM project path, camera waypoints (xyz format), and number of frames.')
projectHome = sys.argv[-3]
waypointFile = sys.argv[-2]
numFrames = int(sys.argv[-1])
loadMesh(projectHome +
'/odm_texturing/odm_textured_model_geo.obj')
waypoints = loadWaypoints(waypointFile)
numWaypoints = len(waypoints)
scene = bpy.data.scenes['Scene']
# create path thru waypoints
curve = bpy.data.curves.new(name='CameraPath', type='CURVE')
curve.dimensions = '3D'
curve.twist_mode = 'Z_UP'
nurbs = curve.splines.new('NURBS')
nurbs.points.add(numWaypoints-1)
weight = 1
for i in range(numWaypoints):
nurbs.points[i].co[0] = waypoints[i][0]
nurbs.points[i].co[1] = waypoints[i][1]
nurbs.points[i].co[2] = waypoints[i][2]
nurbs.points[i].co[3] = weight
nurbs.use_endpoint_u = True
path = bpy.data.objects.new(name='CameraPath', object_data=curve)
scene.objects.link(path)
camera = bpy.data.objects['Camera']
camera.location[0] = 0
camera.location[1] = 0
camera.location[2] = 0
followPath = camera.constraints.new(type='FOLLOW_PATH')
followPath.name = 'CameraFollowPath'
followPath.target = path
followPath.use_curve_follow = True
animateContext = bpy.context.copy()
animateContext['constraint'] = followPath
bpy.ops.constraint.followpath_path_animate(animateContext,
constraint='CameraFollowPath',
frame_start=0,
length=numFrames)
blendName = bpy.path.display_name_from_filepath(bpy.data.filepath)
fileName = projectHome + '/odm_video/odm_' + blendName.replace('photo', 'video')
scene.frame_start = 0
scene.frame_end = numFrames
render = scene.render
render.filepath = fileName + '.mp4'
render.image_settings.file_format = 'FFMPEG'
if(render.use_multiview):
render.image_settings.stereo_3d_format.display_mode = 'TOPBOTTOM'
render.image_settings.views_format = 'STEREO_3D'
render.views[0].file_suffix = ''
format3d = 'top-bottom'
else:
width = render.resolution_x
height = render.resolution_y
format3d = 'none'
render.resolution_x = 4096
render.resolution_y = 2048
render.ffmpeg.audio_codec = 'AAC'
render.ffmpeg.codec = 'H264'
render.ffmpeg.format = 'MPEG4'
render.ffmpeg.video_bitrate = 45000
bpy.ops.render.render(animation=True)
writeMetadata(fileName+'.mp4', format3d)
def loadWaypoints(filename):
waypoints = []
with open(filename) as f:
for line in f:
xyz = line.split()
waypoints.append((float(xyz[0]), float(xyz[1]), float(xyz[2])))
return waypoints
def writeMetadata(filename, format3d):
subprocess.run(['python',
'spatialmedia',
'-i',
'--stereo='+format3d,
filename,
filename+'.injected'])
# check metadata injector was successful
if os.path.exists(filename+'.injected'):
os.remove(filename)
os.rename(filename+'.injected', filename)
if __name__ == '__main__':
main()

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,16 @@
# GRASS scripts
# odm_grass
Generates contour and textured relief maps.
## Requirements
* Compile and install GRASS 7 version or higher, https://grasswiki.osgeo.org/wiki/Compile_and_Install
* Environment variables:
* PYTHONHOME set to the location of Python
* PYTHONPATH set to the location of GRASS Python libs
* PATH includes GRASS bin and lib directories
* GISBASE set to the location of GRASS
## Usage
python odm_grass.py <project-path>
Output is `<project-path>/odm_georeferencing/odm_contour.shp` and `<project-path>/odm_orthophoto/odm_relief.tif`.

View File

@ -0,0 +1,149 @@
#!/usr/bin/env python
# To run, set the following env variables:
# PYTHONHOME location of Python
# PYTHONPATH location of GRASS Python libs
# PATH include GRASS bin and lib
# GISBASE location of GRASS
import os
import sys
import grass.script as gscript
import grass.script.core
import grass.script.setup
rsurfName = 'odm_rsurf'
contourName = 'odm_contour'
orthophotoName = 'odm_orthophoto'
reliefName = 'odm_relief'
shadedReliefName = reliefName + '_shaded'
overwrite = True
def main():
if len(sys.argv) < 2:
sys.exit('Please provide the ODM project path.')
projectHome = sys.argv[1]
gisdb = projectHome+'/grassdata'
location = 'odm'
gisrc = gscript.setup.init(os.environ['GISBASE'], gisdb, location)
# get srs and initial extents
with open(projectHome+'/odm_georeferencing/coords.txt') as f:
srs = f.readline().split()
mean = f.readline().split()
meanX = float(mean[0])
meanY = float(mean[1])
minX = float('inf')
maxX = float('-inf')
minY = float('inf')
maxY = float('-inf')
for line in f:
xy = line.split()
x = float(xy[0])
y = float(xy[1])
minX = min(x, minX)
maxX = max(x, maxX)
minY = min(y, minY)
maxY = max(y, maxY)
datum = srs[0]
proj = srs[1]
zone = srs[2]
gscript.core.create_location(gisdb, location, datum=datum,
proj4='+proj='+proj+' +zone='+zone,
overwrite=overwrite)
n = meanY + maxY
s = meanY + minY
e = meanX + maxX
w = meanX + minX
gscript.run_command('g.region', flags='s', n=n, s=s, e=e, w=w, res=0.01,
res3=0.01, overwrite=overwrite)
contour(projectHome)
relief(projectHome)
os.remove(gisrc)
def contour(projectHome):
"""
Creates a contour map based on the ODM project DEM model.
"""
print 'Creating contour map'
step = 0.25
gscript.run_command('r.in.gdal', flags='o',
input=projectHome+'/odm_georeferencing/odm_georeferencing_model_dem.tif',
output=rsurfName, memory=2047,
overwrite=overwrite)
gscript.run_command('r.contour', input=rsurfName, output=contourName,
step=step, overwrite=overwrite)
gscript.run_command('v.out.ogr', input=contourName,
output=projectHome +
'/odm_georeferencing/odm_contour.shp',
overwrite=overwrite)
def relief(projectHome):
"""
Creates a textured relief map in GeoTIFF format.
NB: this is an RGBA raster and so is readable by image software.
"""
print 'Creating relief map'
gscript.run_command('r.in.gdal', flags='o',
input=projectHome+'/odm_orthophoto/odm_orthophoto.tif',
output=orthophotoName, memory=2047,
overwrite=overwrite)
gscript.run_command('r.composite', red=orthophotoName+'.red',
green=orthophotoName+'.green',
blue=orthophotoName+'.blue',
output=orthophotoName+'.rgb',
overwrite=overwrite)
gscript.run_command('r.relief', input=rsurfName, output=reliefName,
overwrite=overwrite)
gscript.run_command('r.shade', shade=reliefName,
color=orthophotoName+'.rgb', output=shadedReliefName,
overwrite=overwrite)
calc = ';'.join([
'$shadedRelief.red = ' +
'if(isnull($orthophoto.red), 0, r#$shadedRelief)',
'$shadedRelief.green = ' +
'if(isnull($orthophoto.green), 0, g#$shadedRelief)',
'$shadedRelief.blue = ' +
'if(isnull($orthophoto.blue), 0, b#$shadedRelief)',
'$shadedRelief.alpha = ' +
'if(isnull($orthophoto.alpha), 0, 255)'
])
gscript.mapcalc(calc, shadedRelief=shadedReliefName,
orthophoto=orthophotoName, overwrite=overwrite)
gscript.run_command('i.group', group=shadedReliefName+'.group',
input=shadedReliefName+'.red,' +
shadedReliefName+'.green,' +
shadedReliefName+'.blue,' +
shadedReliefName+'.alpha')
gscript.run_command('r.out.gdal', flags='cm',
input=shadedReliefName+'.group',
output=projectHome+'/odm_orthophoto/odm_relief.tif',
format='GTiff', type='Byte',
createopt='TILED=yes,COMPRESS=DEFLATE,PREDICTOR=2,' +
'BLOCKXSIZE=512,BLOCKYSIZE=512',
nodata=0, overwrite=overwrite)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,29 @@
# Merge Preview
Quickly projects drone images on a map by using georeferencing, camera angles and a global DTM. The images are then merged using ODM's split-merge algorithms.
Quality is obviously not good, works only for nadir-only images and requires the images to have gimbal/camera angle information (not all drones provide this information).
Usage:
```
# Install DDB (required for geoprojection)
curl -fsSL https://get.dronedb.app -o get-ddb.sh
sh get-ddb.sh
# Run
python3 mergepreview.py -i images/*.JPG --size 25%
```
## Example
![screen](https://user-images.githubusercontent.com/1951843/134249725-e178489a-e271-4244-abed-e624cd510b88.png)
[Sheffield Park](https://community.opendronemap.org/t/sheffield-park-1/58) images processed with this script.
## Disclaimer
This script is highly experimental. We welcome contributions to improve it.

View File

@ -0,0 +1,126 @@
import argparse
import sys
sys.path.append("../../")
import os
from opendm import orthophoto
from opendm.cutline import compute_cutline
import glob
from opendm.system import run
from opendm import log
import shutil
parser = argparse.ArgumentParser(description='Quick Merge Preview')
parser.add_argument('input',
metavar='<paths>',
nargs='+',
help='Path to input images or image folder')
parser.add_argument('--size', '-s',
metavar='<percentage>',
type=str,
help='Size in percentage terms',
default='25%')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help="Force remove existing directories")
args = parser.parse_args()
try:
log.ODM_INFO("Checking for DDB...")
run("ddb --version")
except:
log.ODM_ERROR("ddb is not installed. Install it first: https://docs.dronedb.app")
if len(args.input) == 1 and os.path.isdir(args.input[0]):
input_images = []
for ext in ["JPG", "JPEG", "TIF", "tiff", "tif", "TIFF"]:
input_images += glob.glob(os.path.join(args.input[0], "*.%s" % ext))
else:
input_images = args.input
log.ODM_INFO("Processing %s images" % len(input_images))
if len(input_images) == 0:
log.ODM_ERROR("No images")
exit(1)
cwd_path = os.path.dirname(input_images[0])
tmp_path = os.path.join(cwd_path, "tmp")
if os.path.isdir(tmp_path):
if args.force:
log.ODM_INFO("Removing previous directory %s" % tmp_path)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("%s exists. Pass --force to override." % tmp_path)
exit(1)
os.makedirs(tmp_path)
for f in input_images:
name, _ = os.path.splitext(os.path.basename(f))
geojson = os.path.join(tmp_path, "%s.geojson" % name)
gpkg = os.path.join(tmp_path, "%s.gpkg" % name)
run("ddb geoproj \"%s\" \"%s\" -s \"%s\"" % (tmp_path, f, args.size))
# Bounds (GPKG)
run("ddb info --format geojson --geometry polygon \"%s\" > \"%s\"" % (f, geojson))
run("ogr2ogr \"%s\" \"%s\"" % (gpkg, geojson))
log.ODM_INFO("Computing cutlines")
projected_images = glob.glob(os.path.join(tmp_path, "*.tif"))
all_orthos_and_ortho_cuts = []
for f in projected_images:
name, _ = os.path.splitext(os.path.basename(f))
cutline_file = os.path.join(tmp_path, "%s_cutline.gpkg" % name)
bounds_file_path = os.path.join(tmp_path, "%s.gpkg" % name)
compute_cutline(f,
bounds_file_path,
cutline_file,
4,
scale=1)
cut_raster = os.path.join(tmp_path, "%s_cut.tif" % name)
orthophoto.compute_mask_raster(f, cutline_file,
cut_raster,
blend_distance=20, only_max_coords_feature=True)
feathered_raster = os.path.join(tmp_path, "%s_feathered.tif" % name)
orthophoto.feather_raster(f, feathered_raster,
blend_distance=20
)
all_orthos_and_ortho_cuts.append([feathered_raster, cut_raster])
log.ODM_INFO("Merging...")
if len(all_orthos_and_ortho_cuts) > 1:
# TODO: histogram matching via rasterio
# currently parts have different color tones
output_file = os.path.join(cwd_path, 'mergepreview.tif')
if os.path.isfile(output_file):
os.remove(output_file)
orthophoto.merge(all_orthos_and_ortho_cuts, output_file, {
'TILED': 'YES',
'COMPRESS': 'LZW',
'PREDICTOR': '2',
'BIGTIFF': 'IF_SAFER',
'BLOCKXSIZE': 512,
'BLOCKYSIZE': 512
})
log.ODM_INFO("Wrote %s" % output_file)
shutil.rmtree(tmp_path)
else:
log.ODM_ERROR("Error: no orthos found to merge")
exit(1)

View File

@ -0,0 +1,31 @@
# NDVI
This script produces a NDVI raster from a CIR orthophoto (odm_orthophoto.tif in your project)
## Requirements
* python_gdal package from apt
* numpy python package (included in ODM build)
## Usage
```
ndvi.py [-h] [--overwrite] <orthophoto.tif> N N <outfile.tif>
positional arguments:
<orthophoto.tif> The CIR orthophoto. Must be a GeoTiff.
N NIR band number
N Vis band number
<outfile.tif> The output file. Also must be in GeoTiff format
optional arguments:
-h, --help show this help message and exit
--overwrite, -o Will overwrite output file if it exists.
```
**Argument order matters! NIR first, then VIS**
## Examples:
Use the [Seneca](https://github.com/OpenDroneMap/odm_data_seneca) dataset for a good working CIR. The band order for that set is NIR-G-B, so you will want to use bands 1 and 2 for this script. After running ODM, the command goes as follows:
`python ndvi.py /path/to/odm_orthophoto.tif 1 2 /path/to/ndvi.tif`
The output in QGIS (with a spectral pseudocolor): ![](http://i.imgur.com/TdLECII.png)

View File

@ -0,0 +1,112 @@
#!/usr/bin/env python3
# A script to calculate agricultural indices
# NDVI - Normalized Difference Vegetation Index - (NIRRED)/(NIR + RED)
# NDRE - Normalized Difference Red Edge - (NIRRE)/(NIR + RE)
# GNDVI - Green NDVI - (NIRGREEN)/(NIR + GREEN)
# https://support.micasense.com/hc/en-us/articles/226531127-Creating-agricultural-indices-NDVI-NDRE-in-QGIS-
# requires python-gdal
import numpy
import argparse
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
raise ImportError("You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
def parse_args():
argument_parser = argparse.ArgumentParser('Createa from a multispectral orthophoto \
a Geotif with NDVI, NDRE and GNDVI agricultural indices')
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The CIR orthophoto. Must be a GeoTiff.")
argument_parser.add_argument("-red", type=int,
help="Red band number")
argument_parser.add_argument("-green", type=int,
help="Green band number")
argument_parser.add_argument("-blue", type=int,
help="Blue band number")
argument_parser.add_argument("-re", type=int,
help="RedEdge band number")
argument_parser.add_argument("-nir", type=int,
help="NIR band number")
argument_parser.add_argument("out", metavar="<outfile.tif>",
type=argparse.FileType('w'),
help="The output file.")
argument_parser.add_argument("--overwrite", "-o",
action='store_true',
default=False,
help="Will overwrite output file if it exists. ")
return argument_parser.parse_args()
if __name__ == "__main__":
# Suppress/hide warning when dividing by zero
numpy.seterr(divide='ignore', invalid='ignore')
rootdir = os.path.dirname(os.path.abspath(__file__))
# Parse args
args = parse_args()
if not args.overwrite and os.path.isfile(os.path.join(rootdir, args.out.name)):
print("File exists, rename or use -o to overwrite.")
exit()
# import raster
print("Reading file")
raster = gdal.Open(args.orthophoto.name)
orthophoto = raster.ReadAsArray()
# parse out bands
print("Reading rasters")
red_matrix=orthophoto[args.red-1].astype(float)
green_matrix=orthophoto[args.green-1].astype(float)
blue_matrix=orthophoto[args.blue-1].astype(float)
re_matrix=orthophoto[args.re-1].astype(float)
nir_matrix=orthophoto[args.nir-1].astype(float)
outfile = args.out
# NDVI
print("Computing NDVI")
#ndvi = calc_ndvi(nir_matrix, red_matrix)
ndvi = (nir_matrix.astype(float) - red_matrix.astype(float)) / (nir_matrix + red_matrix)
# NDRE
print("Computing NDRE")
#ndre = calc_ndre(nir_matrix, re_matrix)
ndre = (nir_matrix.astype(float) - re_matrix.astype(float)) / (nir_matrix + re_matrix)
# GNDVI
print("Computing GNDVI")
#gndvi = calc_gndvi(nir_matrix, green_matrix)
gndvi = (nir_matrix.astype(float) - green_matrix.astype(float)) / (nir_matrix + green_matrix)
__import__("IPython").embed()
print("Saving Files")
# export raster
for name, matrix in zip(['ndvi', 'ndre', 'gndvi' ] ,[ndvi,ndre,gndvi] ):
print(name)
out_driver = gdal.GetDriverByName('GTiff')\
.Create(name+'_'+outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
outband = out_driver.GetRasterBand(1)
outband.SetDescription(name.capitalize())
outband.WriteArray(matrix)
outcrs = osr.SpatialReference()
outcrs.ImportFromWkt(raster.GetProjectionRef())
out_driver.SetProjection(outcrs.ExportToWkt())
out_driver.SetGeoTransform(raster.GetGeoTransform())
outband.FlushCache()

View File

@ -0,0 +1,82 @@
# A script to calculate the NDVI from a color-infrared orthophoto.
# requires python-gdal
import numpy
import argparse
import os.path
try:
from osgeo import gdal
from osgeo import osr
except ImportError:
raise ImportError("You need to install python-gdal. run `apt-get install python-gdal`")
exit()
def parse_args():
p = argparse.ArgumentParser("A script that calculates the NDVI of a CIR orthophoto")
p.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The CIR orthophoto. Must be a GeoTiff.")
p.add_argument("nir", metavar="N", type=int,
help="NIR band number")
p.add_argument("vis", metavar="N", type=int,
help="Vis band number")
p.add_argument("out", metavar="<outfile.tif>",
type=argparse.FileType('w'),
help="The output file. Also must be in GeoTiff format")
p.add_argument("--overwrite", "-o",
action='store_true',
default=False,
help="Will overwrite output file if it exists. ")
return p.parse_args()
def calc_ndvi(nir, vis):
"""
Calculates the NDVI of an orthophoto using nir and vis bands.
:param nir: An array containing the nir band
:param vis: An array containing the vis band
:return: An array that will be exported as a tif
"""
# Take the orthophoto and do nir - vis / nir + vis
# for each cell, calculate ndvi (masking out where divide by 0)
ndvi = numpy.empty(nir.shape, dtype=float)
mask = numpy.not_equal((nirb + visb), 0.0)
return numpy.choose(mask, (-1.0, numpy.true_divide(numpy.subtract(nirb, visb), numpy.add(nirb, visb))))
if __name__ == "__main__":
rootdir = os.path.dirname(os.path.abspath(__file__))
# Parse args
args = parse_args()
if not args.overwrite and os.path.isfile(os.path.join(rootdir, args.out.name)):
print("File exists, rename or use -o to overwrite.")
exit()
# import raster
raster = gdal.Open(args.orthophoto.name)
orthophoto = raster.ReadAsArray()
# parse out bands
nirb = orthophoto[args.nir - 1].astype(float)
visb = orthophoto[args.vis - 1].astype(float)
outfile = args.out
# Do ndvi calc
ndvi = calc_ndvi(nirb, visb)
# export raster
out_driver = gdal.GetDriverByName('GTiff')\
.Create(outfile.name, int(ndvi.shape[1]), int(ndvi.shape[0]), 1, gdal.GDT_Float32)
outband = out_driver.GetRasterBand(1)
outband.WriteArray(ndvi)
outcrs = osr.SpatialReference()
outcrs.ImportFromWkt(raster.GetProjectionRef())
out_driver.SetProjection(outcrs.ExportToWkt())
out_driver.SetGeoTransform(raster.GetGeoTransform())
outband.FlushCache()

View File

@ -0,0 +1,73 @@
#!/usr/bin/env python3
# A script to rename.
# requires python-gdal
import argparse
import sys
try:
from osgeo import gdal
except ImportError:
raise ImportError("You need to install python-gdal : \
run `sudo apt-get install libgdal-dev` \
# Check Gdal version with \
gdal-config --version \
#install corresponding gdal version with pip : \
pip3 install GDAL==2.4.0")
def parse_args():
""" Parse arguments """
argument_parser = argparse.ArgumentParser(
"A script that rename inplace Sentera AGX710 Geotiff orthophoto. ")
argument_parser.add_argument("orthophoto", metavar="<orthophoto.tif>",
type=argparse.FileType('r'),
help="The input orthophoto. Must be a GeoTiff.")
return argument_parser.parse_args()
def rename_sentera_agx710_layers(name):
""" Only rename Geotif built from Sentera AGX710 images with ODM """
if raster.RasterCount != 7:
raise ImportError(F'File {name} does not have 7 layers as a regular\
Geotif built from Sentera AGX710 images with ODM')
if 'RedGreenBlue' in raster.GetRasterBand(1).GetDescription() and \
'RedEdgeGarbageNIR' in raster.GetRasterBand(2).GetDescription():
print("Sentera AGX710 Geotiff file has been detected.\
Layers are name are :")
print("RedGreenBlue for Band 1\nRedEdgeGarbageNIR for Band 2\
\nNone for Band 3\nNone for Band 4\nNone for Band 5\nNone for Band 6")
print("\nAfter renaming bands will be :")
print("Red for Band 1\nGreen for Band 2\nBlue for Band 3\n\
RedEdge for Band 4\nGarbage for Band 5\nNIR for Band 6")
answer = input(
"Are you sure you want to rename the layers of the input file ? [yes/no] ")
if answer =='yes':
raster.GetRasterBand(1).SetDescription('Red')
raster.GetRasterBand(2).SetDescription('Green')
raster.GetRasterBand(3).SetDescription('Blue')
raster.GetRasterBand(4).SetDescription('RedEdge')
raster.GetRasterBand(5).SetDescription('Garbage')
raster.GetRasterBand(6).SetDescription('NIR')
# raster.GetRasterBand(7).SetDescription('Alpha')
else:
print("No renaming")
else :
print(F'No need for band renaming in {name}')
sys.exit()
if __name__ == "__main__":
# Parse args
args = parse_args()
# import raster
raster = gdal.Open(args.orthophoto.name, gdal.GA_Update)
# Rename layers
rename_sentera_agx710_layers(args.orthophoto.name)
# de-reference the datasets, which triggers gdal to save
raster = None

View File

@ -0,0 +1,69 @@
# Orthorectification Tool
![image](https://user-images.githubusercontent.com/1951843/111536715-fc91c380-8740-11eb-844c-5b7960186391.png)
This tool is capable of orthorectifying individual images (or all images) from an existing ODM reconstruction.
![image](https://user-images.githubusercontent.com/1951843/111529183-3ad6b500-8738-11eb-9960-b1aa676f863b.png)
## Usage
After running a reconstruction using ODM:
```
docker run -ti --rm -v /home/youruser/datasets:/datasets opendronemap/odm --project-path /datasets project
```
You can run the orthorectification module by running:
```
docker run -ti --rm -v /home/youruser/datasets:/datasets --entrypoint /code/contrib/orthorectify/run.sh opendronemap/odm /datasets/project
```
This will start the orthorectification process for all images in the dataset. See additional flags you can pass at the end of the command above:
```
usage: orthorectify.py [-h] [--dem DEM] [--no-alpha NO_ALPHA]
[--interpolation {nearest,bilinear}]
[--outdir OUTDIR] [--image-list IMAGE_LIST]
[--images IMAGES] [--threads THREADS]
[--skip-visibility-test SKIP_VISIBILITY_TEST]
dataset
Orthorectification Tool
positional arguments:
dataset Path to ODM dataset
optional arguments:
-h, --help show this help message and exit
--dem DEM Absolute path to DEM to use to
orthorectify images. Default:
odm_dem/dsm.tif
--no-alpha NO_ALPHA Don't output an alpha channel
--interpolation {nearest,bilinear}
Type of interpolation to use to sample
pixel values.Default: bilinear
--outdir OUTDIR Output directory where to store results.
Default: orthorectified
--image-list IMAGE_LIST
Path to file that contains the list of
image filenames to orthorectify. By
default all images in a dataset are
processed. Default: img_list.txt
--images IMAGES Comma-separated list of filenames to
rectify. Use as an alternative to --image-
list. Default: process all images.
--skip-visibility-test SKIP_VISIBILITY_TEST
Skip visibility testing (faster but leaves
artifacts due to relief displacement)
```
## Roadmap
Help us improve this module! We could add:
- [ ] GPU support for faster processing
- [ ] Merging of multiple orthorectified images (blending, filtering, seam leveling)
- [ ] Faster visibility test
- [ ] Different methods for orthorectification (direct)

View File

@ -0,0 +1,392 @@
#!/usr/bin/env python3
# Author: Piero Toffanin
# License: AGPLv3
import os
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
from math import sqrt
import rasterio
import numpy as np
import numpy.ma as ma
import multiprocessing
import argparse
import functools
from skimage.draw import line
from opensfm import dataset
default_dem_path = "odm_dem/dsm.tif"
default_outdir = "orthorectified"
default_image_list = "img_list.txt"
parser = argparse.ArgumentParser(description='Orthorectification Tool')
parser.add_argument('dataset',
type=str,
help='Path to ODM dataset')
parser.add_argument('--dem',
type=str,
default=default_dem_path,
help='Absolute path to DEM to use to orthorectify images. Default: %(default)s')
parser.add_argument('--no-alpha',
type=bool,
help="Don't output an alpha channel")
parser.add_argument('--interpolation',
type=str,
choices=('nearest', 'bilinear'),
default='bilinear',
help="Type of interpolation to use to sample pixel values.Default: %(default)s")
parser.add_argument('--outdir',
type=str,
default=default_outdir,
help="Output directory where to store results. Default: %(default)s")
parser.add_argument('--image-list',
type=str,
default=default_image_list,
help="Path to file that contains the list of image filenames to orthorectify. By default all images in a dataset are processed. Default: %(default)s")
parser.add_argument('--images',
type=str,
default="",
help="Comma-separated list of filenames to rectify. Use as an alternative to --image-list. Default: process all images.")
parser.add_argument('--threads',
type=int,
default=multiprocessing.cpu_count(),
help="Number of CPU processes to use. Default: %(default)s")
parser.add_argument('--skip-visibility-test',
type=bool,
help="Skip visibility testing (faster but leaves artifacts due to relief displacement)")
args = parser.parse_args()
dataset_path = args.dataset
dem_path = os.path.join(dataset_path, default_dem_path) if args.dem == default_dem_path else args.dem
interpolation = args.interpolation
with_alpha = not args.no_alpha
image_list = os.path.join(dataset_path, default_image_list) if args.image_list == default_image_list else args.image_list
cwd_path = os.path.join(dataset_path, default_outdir) if args.outdir == default_outdir else args.outdir
if not os.path.exists(cwd_path):
os.makedirs(cwd_path)
target_images = [] # all
if args.images:
target_images = list(map(str.strip, args.images.split(",")))
print("Processing %s images" % len(target_images))
elif args.image_list:
with open(image_list) as f:
target_images = list(filter(lambda filename: filename != '', map(str.strip, f.read().split("\n"))))
print("Processing %s images" % len(target_images))
if not os.path.exists(dem_path):
print("Whoops! %s does not exist. Provide a path to a valid DEM" % dem_path)
exit(1)
def bilinear_interpolate(im, x, y):
x = np.asarray(x)
y = np.asarray(y)
x0 = np.floor(x).astype(int)
x1 = x0 + 1
y0 = np.floor(y).astype(int)
y1 = y0 + 1
x0 = np.clip(x0, 0, im.shape[1]-1)
x1 = np.clip(x1, 0, im.shape[1]-1)
y0 = np.clip(y0, 0, im.shape[0]-1)
y1 = np.clip(y1, 0, im.shape[0]-1)
Ia = im[ y0, x0 ]
Ib = im[ y1, x0 ]
Ic = im[ y0, x1 ]
Id = im[ y1, x1 ]
wa = (x1-x) * (y1-y)
wb = (x1-x) * (y-y0)
wc = (x-x0) * (y1-y)
wd = (x-x0) * (y-y0)
return wa*Ia + wb*Ib + wc*Ic + wd*Id
# Read DEM
print("Reading DEM: %s" % dem_path)
with rasterio.open(dem_path) as dem_raster:
dem = dem_raster.read()[0]
dem_has_nodata = dem_raster.profile.get('nodata') is not None
if dem_has_nodata:
m = ma.array(dem, mask=dem==dem_raster.nodata)
dem_min_value = m.min()
dem_max_value = m.max()
else:
dem_min_value = dem.min()
dem_max_value = dem.max()
print("DEM Minimum: %s" % dem_min_value)
print("DEM Maximum: %s" % dem_max_value)
h, w = dem.shape
crs = dem_raster.profile.get('crs')
dem_offset_x, dem_offset_y = (0, 0)
if crs:
print("DEM has a CRS: %s" % str(crs))
# Read coords.txt
coords_file = os.path.join(dataset_path, "odm_georeferencing", "coords.txt")
if not os.path.exists(coords_file):
print("Whoops! Cannot find %s (we need that!)" % coords_file)
exit(1)
with open(coords_file) as f:
l = f.readline() # discard
# second line is a northing/easting offset
l = f.readline().rstrip()
dem_offset_x, dem_offset_y = map(float, l.split(" "))
print("DEM offset: (%s, %s)" % (dem_offset_x, dem_offset_y))
print("DEM dimensions: %sx%s pixels" % (w, h))
# Read reconstruction
udata = dataset.UndistortedDataSet(dataset.DataSet(os.path.join(dataset_path, "opensfm")), undistorted_data_path=os.path.join(dataset_path, "opensfm", "undistorted"))
reconstructions = udata.load_undistorted_reconstruction()
if len(reconstructions) == 0:
raise Exception("No reconstructions available")
max_workers = args.threads
print("Using %s threads" % max_workers)
reconstruction = reconstructions[0]
for shot in reconstruction.shots.values():
if len(target_images) == 0 or shot.id in target_images:
print("Processing %s..." % shot.id)
shot_image = udata.load_undistorted_image(shot.id)
r = shot.pose.get_rotation_matrix()
Xs, Ys, Zs = shot.pose.get_origin()
cam_grid_y, cam_grid_x = dem_raster.index(Xs + dem_offset_x, Ys + dem_offset_y)
a1 = r[0][0]
b1 = r[0][1]
c1 = r[0][2]
a2 = r[1][0]
b2 = r[1][1]
c2 = r[1][2]
a3 = r[2][0]
b3 = r[2][1]
c3 = r[2][2]
if not args.skip_visibility_test:
distance_map = np.full((h, w), np.nan)
for j in range(0, h):
for i in range(0, w):
distance_map[j][i] = sqrt((cam_grid_x - i) ** 2 + (cam_grid_y - j) ** 2)
distance_map[distance_map==0] = 1e-7
print("Camera pose: (%f, %f, %f)" % (Xs, Ys, Zs))
img_h, img_w, num_bands = shot_image.shape
half_img_w = (img_w - 1) / 2.0
half_img_h = (img_h - 1) / 2.0
print("Image dimensions: %sx%s pixels" % (img_w, img_h))
f = shot.camera.focal * max(img_h, img_w)
has_nodata = dem_raster.profile.get('nodata') is not None
def process_pixels(step):
imgout = np.full((num_bands, dem_bbox_h, dem_bbox_w), np.nan)
minx = dem_bbox_w
miny = dem_bbox_h
maxx = 0
maxy = 0
for j in range(dem_bbox_miny, dem_bbox_maxy + 1):
if j % max_workers == step:
im_j = j - dem_bbox_miny
for i in range(dem_bbox_minx, dem_bbox_maxx + 1):
im_i = i - dem_bbox_minx
# World coordinates
Za = dem[j][i]
# Skip nodata
if has_nodata and Za == dem_raster.nodata:
continue
Xa, Ya = dem_raster.xy(j, i)
# Remove offset (our cameras don't have the geographic offset)
Xa -= dem_offset_x
Ya -= dem_offset_y
# Colinearity function http://web.pdx.edu/~jduh/courses/geog493f14/Week03.pdf
dx = (Xa - Xs)
dy = (Ya - Ys)
dz = (Za - Zs)
den = a3 * dx + b3 * dy + c3 * dz
x = half_img_w - (f * (a1 * dx + b1 * dy + c1 * dz) / den)
y = half_img_h - (f * (a2 * dx + b2 * dy + c2 * dz) / den)
if x >= 0 and y >= 0 and x <= img_w - 1 and y <= img_h - 1:
# Visibility test
if not args.skip_visibility_test:
check_dem_points = np.column_stack(line(i, j, cam_grid_x, cam_grid_y))
check_dem_points = check_dem_points[np.all(np.logical_and(np.array([0, 0]) <= check_dem_points, check_dem_points < [w, h]), axis=1)]
visible = True
for p in check_dem_points:
ray_z = Zs + (distance_map[p[1]][p[0]] / distance_map[j][i]) * dz
if ray_z > dem_max_value:
break
if dem[p[1]][p[0]] > ray_z:
visible = False
break
if not visible:
continue
if interpolation == 'bilinear':
xi = img_w - 1 - x
yi = img_h - 1 - y
values = bilinear_interpolate(shot_image, xi, yi)
else:
# nearest
xi = img_w - 1 - int(round(x))
yi = img_h - 1 - int(round(y))
values = shot_image[yi][xi]
# We don't consider all zero values (pure black)
# to be valid sample values. This will sometimes miss
# valid sample values.
if not np.all(values == 0):
minx = min(minx, im_i)
miny = min(miny, im_j)
maxx = max(maxx, im_i)
maxy = max(maxy, im_j)
for b in range(num_bands):
imgout[b][im_j][im_i] = values[b]
# for b in range(num_bands):
# minx = min(minx, im_i)
# miny = min(miny, im_j)
# maxx = max(maxx, im_i)
# maxy = max(maxy, im_j)
# imgout[b][im_j][im_i] = 255
return (imgout, (minx, miny, maxx, maxy))
# Compute bounding box of image coverage
# assuming a flat plane at Z = min Z
# (Otherwise we have to scan the entire DEM)
# The Xa,Ya equations are just derived from the colinearity equations
# solving for Xa and Ya instead of x,y
def dem_coordinates(cpx, cpy):
"""
:param cpx principal point X (image coordinates)
:param cpy principal point Y (image coordinates)
"""
Za = dem_min_value
m = (a3*b1*cpy - a1*b3*cpy - (a3*b2 - a2*b3)*cpx - (a2*b1 - a1*b2)*f)
Xa = dem_offset_x + (m*Xs + (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Za - (b3*c1*cpy - b1*c3*cpy - (b3*c2 - b2*c3)*cpx - (b2*c1 - b1*c2)*f)*Zs)/m
Ya = dem_offset_y + (m*Ys - (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Za + (a3*c1*cpy - a1*c3*cpy - (a3*c2 - a2*c3)*cpx - (a2*c1 - a1*c2)*f)*Zs)/m
y, x = dem_raster.index(Xa, Ya)
return (x, y)
dem_ul = dem_coordinates(-(img_w - 1) / 2.0, -(img_h - 1) / 2.0)
dem_ur = dem_coordinates((img_w - 1) / 2.0, -(img_h - 1) / 2.0)
dem_lr = dem_coordinates((img_w - 1) / 2.0, (img_h - 1) / 2.0)
dem_ll = dem_coordinates(-(img_w - 1) / 2.0, (img_h - 1) / 2.0)
dem_bbox = [dem_ul, dem_ur, dem_lr, dem_ll]
dem_bbox_x = np.array(list(map(lambda xy: xy[0], dem_bbox)))
dem_bbox_y = np.array(list(map(lambda xy: xy[1], dem_bbox)))
dem_bbox_minx = min(w - 1, max(0, dem_bbox_x.min()))
dem_bbox_miny = min(h - 1, max(0, dem_bbox_y.min()))
dem_bbox_maxx = min(w - 1, max(0, dem_bbox_x.max()))
dem_bbox_maxy = min(h - 1, max(0, dem_bbox_y.max()))
dem_bbox_w = 1 + dem_bbox_maxx - dem_bbox_minx
dem_bbox_h = 1 + dem_bbox_maxy - dem_bbox_miny
print("Iterating over DEM box: [(%s, %s), (%s, %s)] (%sx%s pixels)" % (dem_bbox_minx, dem_bbox_miny, dem_bbox_maxx, dem_bbox_maxy, dem_bbox_w, dem_bbox_h))
if max_workers > 1:
with multiprocessing.Pool(max_workers) as p:
results = p.map(process_pixels, range(max_workers))
else:
results = [process_pixels(0)]
results = list(filter(lambda r: r[1][0] <= r[1][2] and r[1][1] <= r[1][3], results))
# Merge image
imgout, _ = results[0]
for j in range(dem_bbox_miny, dem_bbox_maxy + 1):
im_j = j - dem_bbox_miny
resimg, _ = results[j % max_workers]
for b in range(num_bands):
imgout[b][im_j] = resimg[b][im_j]
# Merge bounds
minx = dem_bbox_w
miny = dem_bbox_h
maxx = 0
maxy = 0
for _, bounds in results:
minx = min(bounds[0], minx)
miny = min(bounds[1], miny)
maxx = max(bounds[2], maxx)
maxy = max(bounds[3], maxy)
print("Output bounds: (%s, %s), (%s, %s) pixels" % (minx, miny, maxx, maxy))
if minx <= maxx and miny <= maxy:
imgout = imgout[:,miny:maxy+1,minx:maxx+1]
if with_alpha:
alpha = np.zeros((imgout.shape[1], imgout.shape[2]), dtype=np.uint8)
# Set all not-NaN indices to 255
alpha[~np.isnan(imgout[0])] = 255
# Cast
imgout = imgout.astype(shot_image.dtype)
dem_transform = dem_raster.profile['transform']
offset_x, offset_y = dem_raster.xy(dem_bbox_miny + miny, dem_bbox_minx + minx, offset='ul')
profile = {
'driver': 'GTiff',
'width': imgout.shape[2],
'height': imgout.shape[1],
'count': num_bands + 1 if with_alpha else num_bands,
'dtype': imgout.dtype.name,
'transform': rasterio.transform.Affine(dem_transform[0], dem_transform[1], offset_x,
dem_transform[3], dem_transform[4], offset_y),
'nodata': None,
'crs': crs
}
outfile = os.path.join(cwd_path, shot.id)
if not outfile.endswith(".tif"):
outfile = outfile + ".tif"
with rasterio.open(outfile, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
for b in range(num_bands):
wout.write(imgout[b], b + 1)
if with_alpha:
wout.write(alpha, num_bands + 1)
print("Wrote %s" % outfile)
else:
print("Cannot orthorectify image (is the image inside the DEM bounds?)")

View File

@ -0,0 +1,3 @@
#!/bin/bash
PYTHONPATH=$PYTHONPATH:/code/SuperBuild/install/bin/opensfm python3 orthorectify.py "$@"

View File

@ -0,0 +1,44 @@
# Point Cloud To DEM
Convert point clouds (LAS, LAZ, PLY, and any other format compatible with [PDAL](https://pdal.io/stages/readers.html)) to GeoTIFF elevation models.
![image](https://user-images.githubusercontent.com/1951843/112354653-492a5100-8ca3-11eb-9f21-4dda4cae976f.png)
This tool includes methods to perform efficient and scalable gapfill interpolation and is the same method used by ODM's processing pipeline. It is offered here as a standalone module for processing individual point clouds.
## Usage
```
docker run -ti --rm -v /home/youruser/folder_with_point_cloud:/input --entrypoint /code/contrib/pc2dem/pc2dem.py opendronemap/odm /input/point_cloud.las [flags]
```
The result (`dsm.tif` or `dtm.tif`) will be stored in the same folder as the input point cloud. See additional `flags` you can pass at the end of the command above:
```
usage: pc2dem.py [-h] [--type {dsm,dtm}] [--resolution RESOLUTION]
[--gapfill-steps GAPFILL_STEPS]
point_cloud
Generate DEMs from point clouds using ODM's algorithm.
positional arguments:
point_cloud Path to point cloud file (.las, .laz,
.ply)
optional arguments:
-h, --help show this help message and exit
--type {dsm,dtm} Type of DEM. Default: dsm
--resolution RESOLUTION
Resolution in m/px. Default: 0.05
--gapfill-steps GAPFILL_STEPS
Number of steps used to fill areas with
gaps. Set to 0 to disable gap filling.
Starting with a radius equal to the output
resolution, N different DEMs are generated
with progressively bigger radius using the
inverse distance weighted (IDW) algorithm
and merged together. Remaining gaps are
then merged using nearest neighbor
interpolation. Default: 3
```

View File

@ -0,0 +1,57 @@
#!/usr/bin/env python3
# Author: Piero Toffanin
# License: AGPLv3
import os
import sys
sys.path.insert(0, os.path.join("..", "..", os.path.dirname(__file__)))
import argparse
import multiprocessing
from opendm.dem import commands
parser = argparse.ArgumentParser(description='Generate DEMs from point clouds using ODM\'s algorithm.')
parser.add_argument('point_cloud',
type=str,
help='Path to point cloud file (.las, .laz, .ply)')
parser.add_argument('--type',
type=str,
choices=("dsm", "dtm"),
default="dsm",
help="Type of DEM. Default: %(default)s")
parser.add_argument('--resolution',
type=float,
default=0.05,
help='Resolution in m/px. Default: %(default)s')
parser.add_argument('--gapfill-steps',
default=3,
type=int,
help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
'Starting with a radius equal to the output resolution, N different DEMs are generated with '
'progressively bigger radius using the inverse distance weighted (IDW) algorithm '
'and merged together. Remaining gaps are then merged using nearest neighbor interpolation. '
'Default: %(default)s')
args = parser.parse_args()
if not os.path.exists(args.point_cloud):
print("%s does not exist" % args.point_cloud)
exit(1)
outdir = os.path.dirname(args.point_cloud)
radius_steps = [args.resolution / 2.0]
for _ in range(args.gapfill_steps - 1):
radius_steps.append(radius_steps[-1] * 2) # 2 is arbitrary, maybe there's a better value?
commands.create_dem(args.point_cloud,
args.type,
output_type='idw' if args.type == 'dtm' else 'max',
radiuses=list(map(str, radius_steps)),
gapfill=args.gapfill_steps > 0,
outdir=outdir,
resolution=args.resolution,
decimation=1,
verbose=True,
max_workers=multiprocessing.cpu_count(),
keep_unfilled_copy=False
)

View File

@ -0,0 +1,16 @@
# Resize
Resize a dataset (and optional GCP file).
Resizes images, keeps Exif data. The EXIF width and height attributes will be updated accordingly also. ODM GCP files are scaled also.
Usage:
```
pip install -r requirements.txt
python3 resize.py -i images/ -o resized/ 25%
python3 resize.py -i images/1.JPG -o resized.JPG 25%
python3 resize.py -i gcp_list.txt -o resized_gcp_list.txt
```
Originally forked from https://github.com/pierotofy/exifimageresize

View File

@ -0,0 +1,2 @@
Pillow==8.0.1
piexif==1.1.2

View File

@ -0,0 +1,169 @@
import argparse
import os
import glob
import shutil
from PIL import Image
import piexif
import multiprocessing
from multiprocessing.pool import ThreadPool
import sys
sys.path.append("../../")
from opendm.gcp import GCPFile
parser = argparse.ArgumentParser(description='Exif Image Resize')
parser.add_argument('--input', '-i',
metavar='<path>',
required=True,
help='Path to input image/GCP or image folder')
parser.add_argument('--output', '-o',
metavar='<path>',
required=True,
help='Path to output image/GCP or image folder')
parser.add_argument('--force', '-f',
action='store_true',
default=False,
help='Overwrite results')
parser.add_argument('amount',
metavar='<pixel|percentage%>',
type=str,
help='Pixel of largest side or percentage to resize images by')
args = parser.parse_args()
def die(msg):
print(msg)
exit(1)
class nonloc:
errors = 0
def resize_image(image_path, out_path, resize_to, out_path_is_file=False):
"""
:param image_path: path to the image
:param out_path: path to the output directory or file
:param resize_to: percentage ("perc%") or pixels
"""
try:
im = Image.open(image_path)
path, ext = os.path.splitext(image_path)
if out_path_is_file:
resized_image_path = out_path
else:
resized_image_path = os.path.join(out_path, os.path.basename(image_path))
width, height = im.size
max_side = max(width, height)
if isinstance(resize_to, str) and resize_to.endswith("%"):
ratio = float(resize_to[:-1]) / 100.0
else:
ratio = float(resize_to) / float(max_side)
resized_width = int(width * ratio)
resized_height = int(height * ratio)
im.thumbnail((resized_width, resized_height), Image.LANCZOS)
driver = ext[1:].upper()
if driver == 'JPG':
driver = 'JPEG'
if 'exif' in im.info:
exif_dict = piexif.load(im.info['exif'])
exif_dict['Exif'][piexif.ExifIFD.PixelXDimension] = resized_width
exif_dict['Exif'][piexif.ExifIFD.PixelYDimension] = resized_height
im.save(resized_image_path, driver, exif=piexif.dump(exif_dict), quality=100)
else:
im.save(resized_image_path, driver, quality=100)
im.close()
print("{} ({}x{}) --> {} ({}x{})".format(image_path, width, height, resized_image_path, resized_width, resized_height))
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(image_path, str(e)))
nonloc.errors += 1
def resize_gcp(gcp_path, out_path, resize_to, out_path_is_file=False):
"""
:param gcp_path: path to the GCP
:param out_path: path to the output directory or file
:param resize_to: percentage ("perc%") or pixels
"""
try:
if out_path_is_file:
resized_gcp_path = out_path
else:
resized_gcp_path = os.path.join(out_path, os.path.basename(gcp_path))
if resize_to.endswith("%"):
ratio = float(resize_to[:-1]) / 100.0
else:
ratio = resize_to
gcp = GCPFile(gcp_path)
if gcp.entries_count() > 0:
gcp.make_resized_copy(resized_gcp_path, ratio)
else:
raise ValueError("No GCP entries")
print("{} --> {}".format(gcp_path, resized_gcp_path))
except (IOError, ValueError) as e:
print("Error: Cannot resize {}: {}.".format(gcp_path, str(e)))
nonloc.errors += 1
if not args.amount.endswith("%"):
args.amount = float(args.amount)
if args.amount <= 0:
die("Invalid amount")
else:
try:
if float(args.amount[:-1]) <= 0:
die("Invalid amount")
except:
die("Invalid amount")
files = []
gcps = []
if os.path.isdir(args.input):
for ext in ["JPG", "JPEG", "PNG", "TIFF", "TIF"]:
files += glob.glob("{}/*.{}".format(args.input, ext))
files += glob.glob("{}/*.{}".format(args.input, ext.lower()))
gcps = glob.glob("{}/*.txt".format(args.input))
elif os.path.exists(args.input):
_, ext = os.path.splitext(args.input)
if ext.lower() == ".txt":
gcps = [args.input]
else:
files = [args.input]
else:
die("{} does not exist".format(args.input))
create_dir = len(files) > 1 or args.output.endswith("/") or len(gcps) > 1
if create_dir and os.path.isdir(args.output):
if not args.force:
die("{} exists, pass --force to overwrite results".format(args.output))
else:
shutil.rmtree(args.output)
elif not create_dir and os.path.isfile(args.output):
if not args.force:
die("{} exists, pass --force to overwrite results".format(args.output))
else:
os.remove(args.output)
if create_dir:
os.makedirs(args.output)
pool = ThreadPool(processes=multiprocessing.cpu_count())
def resize(file):
_, ext = os.path.splitext(file)
if ext.lower() == ".txt":
return resize_gcp(file, args.output, args.amount, not create_dir)
else:
return resize_image(file, args.output, args.amount, not create_dir)
pool.map(resize, files + gcps)
print("Process completed, {} errors.".format(nonloc.errors))

View File

@ -0,0 +1,25 @@
#!/bin/bash
#GPL2 jmenezes ODM extract exif lon/lat project to utm with proj4; 2017-05-28
# line 23 tab bugfix 2017-07-11
# apt-get install exiftool geotiff-bin
if [ $# -lt 2 ]; then
echo "run inside /images/ directory" 1>&2
echo $(basename $0)" zone [S|N] > camera_wgs84utm.txt" 1>&2
exit 1
fi
Z=$1
case $2 in
s|S) printf "EPSG:327%02d\n" $Z; H=south
;;
n|N) printf "EPSG:326%02d\n" $Z; H=north
;;
*)
;;
esac
for i in *[jpg,JPG,tif,TIF]; do
exiftool $i | grep GPS | grep Position | \
awk -F \: -v img=$i '{ print $2","img }' | tr -d [:blank:] | \
sed s/deg/d/g | tr \, \\t | awk '{ print $2,$1,$3 }' | \
proj -f "%.3f" +proj=utm +zone=$Z +$H +ellps=WGS84
done | sed s/\ /\\t/g
exit 0

View File

@ -0,0 +1,31 @@
# Visible Vegetation Indexes
This script produces a Vegetation Index raster from a RGB orthophoto (odm_orthophoto.tif in your project)
## Requirements
* rasterio (pip install rasterio)
* numpy python package (included in ODM build)
## Usage
```
vegind.py <orthophoto.tif> index
positional arguments:
<orthophoto.tif> The RGB orthophoto. Must be a GeoTiff.
index Index identifier. Allowed values: ngrdi, tgi, vari
```
Output will be generated with index suffix in the same directory as input.
## Examples
`python vegind.py /path/to/odm_orthophoto.tif tgi`
Orthophoto photo of Koniaków grass field and forest in QGIS: ![](http://imgur.com/K6x3nB2.jpg)
The Triangular Greenness Index output in QGIS (with a spectral pseudocolor): ![](http://i.imgur.com/f9TzISU.jpg)
Visible Atmospheric Resistant Index: ![](http://imgur.com/Y7BHzLs.jpg)
Normalized green-red difference index: ![](http://imgur.com/v8cmaPS.jpg)
## Bibliography
1. Hunt, E. Raymond, et al. "A Visible Band Index for Remote Sensing Leaf Chlorophyll Content At the Canopy Scale." ITC journal 21(2013): 103-112. doi: 10.1016/j.jag.2012.07.020
(https://doi.org/10.1016/j.jag.2012.07.020)

View File

@ -0,0 +1,95 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
import rasterio, os, sys
import numpy as np
class bcolors:
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
try:
file = sys.argv[1]
typ = sys.argv[2]
(fileRoot, fileExt) = os.path.splitext(file)
outFileName = fileRoot + "_" + typ + fileExt
if typ not in ['vari', 'tgi', 'ngrdi']:
raise IndexError
except (TypeError, IndexError, NameError):
print bcolors.FAIL + 'Arguments messed up. Check arguments order and index name' + bcolors.ENDC
print 'Usage: ./vegind.py orto index'
print ' orto - filepath to RGB orthophoto'
print ' index - Vegetation Index'
print bcolors.OKGREEN + 'Available indexes: vari, ngrdi, tgi' + bcolors.ENDC
sys.exit()
def calcNgrdi(red, green):
"""
Normalized green red difference index
Tucker,C.J.,1979.
Red and photographic infrared linear combinations for monitoring vegetation.
Remote Sensing of Environment 8, 127150
:param red: red visible channel
:param green: green visible channel
:return: ngrdi index array
"""
mask = np.not_equal(np.add(red,green), 0.0)
return np.choose(mask, (-9999.0, np.true_divide(
np.subtract(green,red),
np.add(red,green))))
def calcVari(red,green,blue):
"""
Calculates Visible Atmospheric Resistant Index
Gitelson, A.A., Kaufman, Y.J., Stark, R., Rundquist, D., 2002.
Novel algorithms for remote estimation of vegetation fraction.
Remote Sensing of Environment 80, 7687.
:param red: red visible channel
:param green: green visible channel
:param blue: blue visible channel
:return: vari index array, that will be saved to tiff
"""
mask = np.not_equal(np.subtract(np.add(green,red),blue), 0.0)
return np.choose(mask, (-9999.0, np.true_divide(np.subtract(green,red),np.subtract(np.add(green,red),blue))))
def calcTgi(red,green,blue):
"""
Calculates Triangular Greenness Index
Hunt, E. Raymond Jr.; Doraiswamy, Paul C.; McMurtrey, James E.; Daughtry, Craig S.T.; Perry, Eileen M.; and Akhmedov, Bakhyt,
A visible band index for remote sensing leaf chlorophyll content at the canopy scale (2013).
Publications from USDA-ARS / UNL Faculty. Paper 1156.
http://digitalcommons.unl.edu/usdaarsfacpub/1156
:param red: red channel
:param green: green channel
:param blue: blue channel
:return: tgi index array, that will be saved to tiff
"""
mask = np.not_equal(green-red+blue-255.0, 0.0)
return np.choose(mask, (-9999.0, np.subtract(green, np.multiply(0.39,red), np.multiply(0.61, blue))))
try:
with rasterio.Env():
ds = rasterio.open(file)
profile = ds.profile
profile.update(dtype=rasterio.float32, count=1, nodata=-9999)
red = np.float32(ds.read(1))
green = np.float32(ds.read(2))
blue = np.float32(ds.read(3))
np.seterr(divide='ignore', invalid='ignore')
if typ == 'ngrdi':
indeks = calcNgrdi(red,green)
elif typ == 'vari':
indeks = calcVari(red, green, blue)
elif typ == 'tgi':
indeks = calcTgi(red, green, blue)
with rasterio.open(outFileName, 'w', BIGTIFF="IF_SAFER", **profile) as dst:
dst.write(indeks.astype(rasterio.float32), 1)
except rasterio.errors.RasterioIOError:
print bcolors.FAIL + 'Orthophoto file not found or access denied' + bcolors.ENDC
sys.exit()

View File

@ -0,0 +1,3 @@
The g++ and gcc scripts in this directory are used to replace the real g++ and gcc programs so that compilation across all projects (including dependencies) uses the -march=nehalem flag, which allows us to build a docker image compatible with most Intel based CPUs.
Without the -march=nehalem flag, a docker image will contain binaries that are optimized for the machine that built the image, and will not run on older machines.

17
o/ODM/ODM-2.8.7/docker/g++ Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
declare -a args
for i in "$@"
do
if [[ "$i" != -march* ]]; then
args+=("$i")
fi
done
ARCH=nehalem
if [[ $(uname -m) == "aarch64" ]]; then
ARCH=armv8-a
fi
/usr/bin/g++_real -march=$ARCH "${args[@]}"

17
o/ODM/ODM-2.8.7/docker/gcc Executable file
View File

@ -0,0 +1,17 @@
#!/bin/bash
declare -a args
for i in "$@"
do
if [[ "$i" != -march* ]]; then
args+=("$i")
fi
done
ARCH=nehalem
if [[ $(uname -m) == "aarch64" ]]; then
ARCH=armv8-a
fi
/usr/bin/gcc_real -march=$ARCH "${args[@]}"

View File

@ -0,0 +1,26 @@
****************************************
PLEASE REMOVE THIS NOTE AFTER READING IT!
First of all, thank you for taking the time to report an issue.
Before you continue, make sure you are in the right place. Please open an issue only to report faults and bugs. For questions and discussion please open a topic on http://community.opendronemap.org/c/opendronemap.
Please use the format below to report bugs and faults.
****************************************
### How did you install ODM? (Docker, installer, natively, ...)?
[Type answer here]
### What is the problem?
[Type answer here]
### What should be the expected behavior? If this is a feature request, please describe in detail the changes you think should be made to the code, citing files and lines where changes should be made, if possible.
[Type answer here]
### How can we reproduce this? What steps did you do to trigger the problem? If this is an issue with processing a dataset, YOU MUST include a copy of your dataset uploaded on Google Drive or Dropbox (otherwise we cannot reproduce this).
[Type answer here]

View File

@ -0,0 +1,49 @@
FROM nvidia/cuda:11.2.0-devel-ubuntu20.04 AS builder
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
# Prepare directories
WORKDIR /code
# Copy everything
COPY . ./
# Run the build
RUN PORTABLE_INSTALL=YES GPU_INSTALL=YES bash configure.sh install
# Clean Superbuild
RUN bash configure.sh clean
### END Builder
### Use a second image for the final asset to reduce the number and
# size of the layers.
FROM nvidia/cuda:11.2.0-runtime-ubuntu20.04
#FROM nvidia/cuda:11.2.0-devel-ubuntu20.04
# Env variables
ENV DEBIAN_FRONTEND=noninteractive \
PYTHONPATH="$PYTHONPATH:/code/SuperBuild/install/lib/python3.9/dist-packages:/code/SuperBuild/install/lib/python3.8/dist-packages:/code/SuperBuild/install/bin/opensfm" \
LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/code/SuperBuild/install/lib"
WORKDIR /code
# Copy everything we built from the builder
COPY --from=builder /code /code
# Copy the Python libraries installed via pip from the builder
COPY --from=builder /usr/local /usr/local
# Install shared libraries that we depend on via APT, but *not*
# the -dev packages to save space!
# Also run a smoke test on ODM and OpenSfM
RUN bash configure.sh installruntimedepsonly \
&& apt-get clean \
&& rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* \
&& bash run.sh --help \
&& bash -c "eval $(python3 /code/opendm/context.py) && python3 -c 'from opensfm import io, pymap'"
# Entry point
ENTRYPOINT ["python3", "/code/run.py"]

View File

@ -0,0 +1,151 @@
; Script generated by the Inno Setup Script Wizard.
; SEE THE DOCUMENTATION FOR DETAILS ON CREATING INNO SETUP SCRIPT FILES!
#define MyAppName "ODM"
#define VerFile FileOpen("VERSION")
#define MyAppVersion FileRead(VerFile)
#expr FileClose(VerFile)
#undef VerFile
#define MyAppPublisher "OpenDroneMap"
#define MyAppURL "https://opendronemap.org"
[Setup]
; NOTE: The value of AppId uniquely identifies this application.
; Do not use the same AppId value in installers for other applications.
; (To generate a new GUID, click Tools | Generate GUID inside the IDE.)
AppId={{443998BA-9F8F-4A69-9A96-0D8FBC8C6393}
AppName={#MyAppName}
AppVersion={#MyAppVersion}
AppPublisher={#MyAppPublisher}
AppPublisherURL={#MyAppURL}
AppSupportURL={#MyAppURL}
AppUpdatesURL={#MyAppURL}
DefaultDirName=C:\ODM
DefaultGroupName={#MyAppName}
AllowNoIcons=yes
LicenseFile=LICENSE
OutputDir=dist
OutputBaseFilename=ODM_Setup_{#MyAppVersion}
Compression=lzma
SolidCompression=yes
ArchitecturesAllowed=x64
SignTool=signtool
PrivilegesRequired=lowest
UsePreviousAppDir=no
;SetupIconFile=setup.ico
[Languages]
Name: "english"; MessagesFile: "compiler:Default.isl"
[Files]
Source: "contrib\*"; DestDir: "{app}\contrib"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "licenses\*"; DestDir: "{app}\licenses"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "opendm\*"; DestDir: "{app}\opendm"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "stages\*"; DestDir: "{app}\stages"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "SuperBuild\install\bin\*"; DestDir: "{app}\SuperBuild\install\bin"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "venv\*"; DestDir: "{app}\venv"; Excludes: "__pycache__,pyvenv.cfg"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "python38\*"; DestDir: "{app}\python38"; Excludes: "__pycache__"; Flags: ignoreversion recursesubdirs createallsubdirs
Source: "console.bat"; DestDir: "{app}"; Flags: ignoreversion
Source: "VERSION"; DestDir: "{app}"; Flags: ignoreversion
Source: "LICENSE"; DestDir: "{app}"; Flags: ignoreversion
Source: "run.bat"; DestDir: "{app}"; Flags: ignoreversion
Source: "run.py"; DestDir: "{app}"; Flags: ignoreversion
Source: "settings.yaml"; DestDir: "{app}"; Flags: ignoreversion
Source: "win32env.bat"; DestDir: "{app}"; Flags: ignoreversion
Source: "winrun.bat"; DestDir: "{app}"; Flags: ignoreversion
Source: "SuperBuild\download\vc_redist.x64.exe"; DestDir: {tmp}; Flags: dontcopy
[Icons]
Name: {group}\ODM Console; Filename: "{app}\console.bat"; WorkingDir: "{app}"
Name: "{userdesktop}\ODM Console"; Filename: "{app}\console.bat"; WorkingDir: "{app}"; Tasks: desktopicon
[Tasks]
Name: "desktopicon"; Description: "{cm:CreateDesktopIcon}"; GroupDescription: "{cm:AdditionalIcons}"; Flags: unchecked
[Run]
Filename: "{tmp}\vc_redist.x64.exe"; StatusMsg: "Installing Visual C++ Redistributable Packages for Visual Studio 2019"; Parameters: "/quiet"; Check: VC2019RedistNeedsInstall ; Flags: waituntilterminated
Filename: "{app}\console.bat"; Description: {cm:LaunchProgram,ODM Console}; Flags: nowait postinstall skipifsilent
[Code]
function VC2019RedistNeedsInstall: Boolean;
var
Version: String;
begin
if RegQueryStringValue(HKEY_LOCAL_MACHINE,
'SOFTWARE\Microsoft\VisualStudio\14.0\VC\Runtimes\x64', 'Version', Version) then
begin
// Is the installed version at least 14.14 ?
Log('VC Redist Version check : found ' + Version);
Result := (CompareStr(Version, 'v14.14.26429.03')<0);
end
else
begin
// Not even an old version installed
Result := True;
end;
if (Result) then
begin
ExtractTemporaryFile('vc_redist.x64.exe');
end;
end;
function GetUninstallString(): String;
var
sUnInstPath: String;
sUnInstallString: String;
begin
sUnInstPath := ExpandConstant('Software\Microsoft\Windows\CurrentVersion\Uninstall\{#emit SetupSetting("AppId")}_is1');
sUnInstallString := '';
if not RegQueryStringValue(HKLM, sUnInstPath, 'UninstallString', sUnInstallString) then
RegQueryStringValue(HKCU, sUnInstPath, 'UninstallString', sUnInstallString);
Result := sUnInstallString;
end;
function IsUpgrade(): Boolean;
begin
Result := (GetUninstallString() <> '');
end;
function UnInstallOldVersion(): Integer;
var
sUnInstallString: String;
iResultCode: Integer;
begin
{ Return Values: }
{ 1 - uninstall string is empty }
{ 2 - error executing the UnInstallString }
{ 3 - successfully executed the UnInstallString }
{ default return value }
Result := 0;
{ get the uninstall string of the old app }
sUnInstallString := GetUninstallString();
if sUnInstallString <> '' then begin
sUnInstallString := RemoveQuotes(sUnInstallString);
if Exec(sUnInstallString, '/SILENT /NORESTART /SUPPRESSMSGBOXES','', SW_HIDE, ewWaitUntilTerminated, iResultCode) then
Result := 3
else
Result := 2;
end else
Result := 1;
end;
procedure CurStepChanged(CurStep: TSetupStep);
begin
if (CurStep=ssInstall) then
begin
if (IsUpgrade()) then
begin
UnInstallOldVersion();
end;
end;
end;
[UninstallDelete]
Type: filesandordirs; Name: "{app}\SuperBuild"
Type: filesandordirs; Name: "{app}\contrib"
Type: filesandordirs; Name: "{app}\licenses"
Type: filesandordirs; Name: "{app}\opendm"
Type: filesandordirs; Name: "{app}\stages"
Type: filesandordirs; Name: "{app}\venv"

View File

@ -0,0 +1,199 @@
This package was downloaded from
http://xorg.freedesktop.org/releases/individual/lib/
Copyright 1986, 1987, 1988, 1989, 1994, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from The Open Group.
Copyright (c) 1996 Digital Equipment Corporation, Maynard, Massachusetts.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
DIGITAL EQUIPMENT CORPORATION BE LIABLE FOR ANY CLAIM, DAMAGES, INCLUDING,
BUT NOT LIMITED TO CONSEQUENTIAL OR INCIDENTAL DAMAGES, OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of Digital Equipment Corporation
shall not be used in advertising or otherwise to promote the sale, use or other
dealings in this Software without prior written authorization from Digital
Equipment Corporation.
Copyright (c) 1997 by Silicon Graphics Computer Systems, Inc.
Permission to use, copy, modify, and distribute this
software and its documentation for any purpose and without
fee is hereby granted, provided that the above copyright
notice appear in all copies and that both that copyright
notice and this permission notice appear in supporting
documentation, and that the name of Silicon Graphics not be
used in advertising or publicity pertaining to distribution
of the software without specific prior written permission.
Silicon Graphics makes no representation about the suitability
of this software for any purpose. It is provided "as is"
without any express or implied warranty.
SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
THE USE OR PERFORMANCE OF THIS SOFTWARE.
Copyright 1992 Network Computing Devices
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name of NCD. not be used in advertising or
publicity pertaining to distribution of the software without specific,
written prior permission. NCD. makes no representations about the
suitability of this software for any purpose. It is provided "as is"
without express or implied warranty.
NCD. DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL NCD.
BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
Copyright 1991,1993 by Digital Equipment Corporation, Maynard, Massachusetts,
and Olivetti Research Limited, Cambridge, England.
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the names of Digital or Olivetti
not be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
DIGITAL AND OLIVETTI DISCLAIM ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THEY BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
Copyright 1986, 1987, 1988 by Hewlett-Packard Corporation
Permission to use, copy, modify, and distribute this
software and its documentation for any purpose and without
fee is hereby granted, provided that the above copyright
notice appear in all copies and that both that copyright
notice and this permission notice appear in supporting
documentation, and that the name of Hewlett-Packard not be used in
advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
Hewlett-Packard makes no representations about the
suitability of this software for any purpose. It is provided
"as is" without express or implied warranty.
This software is not subject to any license of the American
Telephone and Telegraph Company or of the Regents of the
University of California.
Copyright (c) 1994, 1995 Hewlett-Packard Company
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL HEWLETT-PACKARD COMPANY BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the Hewlett-Packard
Company shall not be used in advertising or otherwise to promote the
sale, use or other dealings in this Software without prior written
authorization from the Hewlett-Packard Company.
Copyright Digital Equipment Corporation, 1996
Permission to use, copy, modify, distribute, and sell this
documentation for any purpose is hereby granted without fee,
provided that the above copyright notice and this permission
notice appear in all copies. Digital Equipment Corporation
makes no representations about the suitability for any purpose
of the information in this document. This documentation is
provided ``as is'' without express or implied warranty.
Copyright (c) 1999, 2005, 2006, Oracle and/or its affiliates. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
Copyright (c) 1989 X Consortium, Inc. and Digital Equipment Corporation.
Copyright (c) 1992 X Consortium, Inc. and Intergraph Corporation.
Copyright (c) 1993 X Consortium, Inc. and Silicon Graphics, Inc.
Copyright (c) 1994, 1995 X Consortium, Inc. and Hewlett-Packard Company.
Permission to use, copy, modify, and distribute this documentation for
any purpose and without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
Digital Equipment Corporation, Intergraph Corporation, Silicon
Graphics, Hewlett-Packard, and the X Consortium make no
representations about the suitability for any purpose of the
information in this document. This documentation is provided ``as is''
without express or implied warranty.

View File

@ -0,0 +1,944 @@
This package was downloaded from
http://xorg.freedesktop.org/releases/individual/lib/
The following is the 'standard copyright' agreed upon by most contributors,
and is currently the canonical license preferred by the X.Org Foundation.
This is a slight variant of the common MIT license form published by the
Open Source Initiative at http://www.opensource.org/licenses/mit-license.php
Copyright holders of new code should use this license statement where
possible, and insert their name to this list. Please sort by surname
for people, and by the full name for other entities (e.g. Juliusz
Chroboczek sorts before Intel Corporation sorts before Daniel Stone).
See each individual source file or directory for the license that applies
to that file.
Copyright (C) 2003-2006,2008 Jamey Sharp, Josh Triplett
Copyright © 2009 Red Hat, Inc.
Copyright 1990-1992,1999,2000,2004,2009,2010 Oracle and/or its affiliates.
All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the next
paragraph) shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
----------------------------------------------------------------------
The following licenses are 'legacy' - usually MIT/X11 licenses with the name
of the copyright holder(s) in the license statement:
Copyright 1984-1994, 1998 The Open Group
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
OPEN GROUP BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN
AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of The Open Group shall not be
used in advertising or otherwise to promote the sale, use or other dealings
in this Software without prior written authorization from The Open Group.
X Window System is a trademark of The Open Group.
----------------------------------------
Copyright 1985, 1986, 1987, 1988, 1989, 1990, 1991, 1994, 1996 X Consortium
Copyright 2000 The XFree86 Project, Inc.
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the X Consortium shall
not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization
from the X Consortium.
Copyright 1985, 1986, 1987, 1988, 1989, 1990, 1991 by
Digital Equipment Corporation
Portions Copyright 1990, 1991 by Tektronix, Inc.
Permission to use, copy, modify and distribute this documentation for
any purpose and without fee is hereby granted, provided that the above
copyright notice appears in all copies and that both that copyright notice
and this permission notice appear in all copies, and that the names of
Digital and Tektronix not be used in in advertising or publicity pertaining
to this documentation without specific, written prior permission.
Digital and Tektronix makes no representations about the suitability
of this documentation for any purpose.
It is provided ``as is'' without express or implied warranty.
----------------------------------------
Copyright (c) 1999-2000 Free Software Foundation, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
FREE SOFTWARE FOUNDATION BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of the Free Software Foundation
shall not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization from the
Free Software Foundation.
----------------------------------------
Code and supporting documentation (c) Copyright 1990 1991 Tektronix, Inc.
All Rights Reserved
This file is a component of an X Window System-specific implementation
of Xcms based on the TekColor Color Management System. TekColor is a
trademark of Tektronix, Inc. The term "TekHVC" designates a particular
color space that is the subject of U.S. Patent No. 4,985,853 (equivalent
foreign patents pending). Permission is hereby granted to use, copy,
modify, sell, and otherwise distribute this software and its
documentation for any purpose and without fee, provided that:
1. This copyright, permission, and disclaimer notice is reproduced in
all copies of this software and any modification thereof and in
supporting documentation;
2. Any color-handling application which displays TekHVC color
coordinates identifies these as TekHVC color coordinates in any
interface that displays these coordinates and in any associated
documentation;
3. The term "TekHVC" is always used, and is only used, in association
with the mathematical derivations of the TekHVC Color Space,
including those provided in this file and any equivalent pathways and
mathematical derivations, regardless of digital (e.g., floating point
or integer) representation.
Tektronix makes no representation about the suitability of this software
for any purpose. It is provided "as is" and with all faults.
TEKTRONIX DISCLAIMS ALL WARRANTIES APPLICABLE TO THIS SOFTWARE,
INCLUDING THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE. IN NO EVENT SHALL TEKTRONIX BE LIABLE FOR ANY
SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
RESULTING FROM LOSS OF USE, DATA, OR PROFITS, WHETHER IN AN ACTION OF
CONTRACT, NEGLIGENCE, OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
CONNECTION WITH THE USE OR THE PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
(c) Copyright 1995 FUJITSU LIMITED
This is source code modified by FUJITSU LIMITED under the Joint
Development Agreement for the CDE/Motif PST.
----------------------------------------
Copyright 1992 by Oki Technosystems Laboratory, Inc.
Copyright 1992 by Fuji Xerox Co., Ltd.
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of Oki Technosystems
Laboratory and Fuji Xerox not be used in advertising or publicity
pertaining to distribution of the software without specific, written
prior permission.
Oki Technosystems Laboratory and Fuji Xerox make no representations
about the suitability of this software for any purpose. It is provided
"as is" without express or implied warranty.
OKI TECHNOSYSTEMS LABORATORY AND FUJI XEROX DISCLAIM ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL OKI TECHNOSYSTEMS
LABORATORY AND FUJI XEROX BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1990, 1991, 1992, 1993, 1994 by FUJITSU LIMITED
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of FUJITSU LIMITED
not be used in advertising or publicity pertaining to distribution
of the software without specific, written prior permission.
FUJITSU LIMITED makes no representations about the suitability of
this software for any purpose.
It is provided "as is" without express or implied warranty.
FUJITSU LIMITED DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
EVENT SHALL FUJITSU LIMITED BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright (c) 1995 David E. Wexelblat. All rights reserved
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL DAVID E. WEXELBLAT BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
Except as contained in this notice, the name of David E. Wexelblat shall
not be used in advertising or otherwise to promote the sale, use or
other dealings in this Software without prior written authorization
from David E. Wexelblat.
----------------------------------------
Copyright 1990, 1991 by OMRON Corporation
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name OMRON not be used in
advertising or publicity pertaining to distribution of the software without
specific, written prior permission. OMRON makes no representations
about the suitability of this software for any purpose. It is provided
"as is" without express or implied warranty.
OMRON DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
EVENT SHALL OMRON BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTUOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1985, 1986, 1987, 1988, 1989, 1990, 1991 by
Digital Equipment Corporation
Portions Copyright 1990, 1991 by Tektronix, Inc
Rewritten for X.org by Chris Lee <clee@freedesktop.org>
Permission to use, copy, modify, distribute, and sell this documentation
for any purpose and without fee is hereby granted, provided that the above
copyright notice and this permission notice appear in all copies.
Chris Lee makes no representations about the suitability for any purpose
of the information in this document. It is provided \`\`as-is'' without
express or implied warranty.
----------------------------------------
Copyright 1993 by Digital Equipment Corporation, Maynard, Massachusetts,
Copyright 1994 by FUJITSU LIMITED
Copyright 1994 by Sony Corporation
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the names of Digital, FUJITSU
LIMITED and Sony Corporation not be used in advertising or publicity
pertaining to distribution of the software without specific, written
prior permission.
DIGITAL, FUJITSU LIMITED AND SONY CORPORATION DISCLAIMS ALL WARRANTIES
WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL DIGITAL, FUJITSU LIMITED
AND SONY CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1991 by the Open Software Foundation
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name of Open Software Foundation
not be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission. Open Software
Foundation makes no representations about the suitability of this
software for any purpose. It is provided "as is" without express or
implied warranty.
OPEN SOFTWARE FOUNDATION DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL OPEN SOFTWARE FOUNDATIONN BE
LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1990, 1991, 1992,1993, 1994 by FUJITSU LIMITED
Copyright 1993, 1994 by Sony Corporation
Permission to use, copy, modify, distribute, and sell this software and
its documentation for any purpose is hereby granted without fee, provided
that the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name of FUJITSU LIMITED and Sony Corporation
not be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission. FUJITSU LIMITED and
Sony Corporation makes no representations about the suitability of this
software for any purpose. It is provided "as is" without express or
implied warranty.
FUJITSU LIMITED AND SONY CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL FUJITSU LIMITED OR SONY CORPORATION BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER
RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE
USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright (c) 1993, 1995 by Silicon Graphics Computer Systems, Inc.
Permission to use, copy, modify, and distribute this
software and its documentation for any purpose and without
fee is hereby granted, provided that the above copyright
notice appear in all copies and that both that copyright
notice and this permission notice appear in supporting
documentation, and that the name of Silicon Graphics not be
used in advertising or publicity pertaining to distribution
of the software without specific prior written permission.
Silicon Graphics makes no representation about the suitability
of this software for any purpose. It is provided "as is"
without any express or implied warranty.
SILICON GRAPHICS DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS
SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT SHALL SILICON
GRAPHICS BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH
THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1991, 1992, 1993, 1994 by FUJITSU LIMITED
Copyright 1993 by Digital Equipment Corporation
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of FUJITSU LIMITED and
Digital Equipment Corporation not be used in advertising or publicity
pertaining to distribution of the software without specific, written
prior permission. FUJITSU LIMITED and Digital Equipment Corporation
makes no representations about the suitability of this software for
any purpose. It is provided "as is" without express or implied
warranty.
FUJITSU LIMITED AND DIGITAL EQUIPMENT CORPORATION DISCLAIM ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
FUJITSU LIMITED AND DIGITAL EQUIPMENT CORPORATION BE LIABLE FOR
ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF
THIS SOFTWARE.
----------------------------------------
Copyright 1992, 1993 by FUJITSU LIMITED
Copyright 1993 by Fujitsu Open Systems Solutions, Inc.
Copyright 1994 by Sony Corporation
Permission to use, copy, modify, distribute and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of FUJITSU LIMITED,
Fujitsu Open Systems Solutions, Inc. and Sony Corporation not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
FUJITSU LIMITED, Fujitsu Open Systems Solutions, Inc. and
Sony Corporation make no representations about the suitability of
this software for any purpose. It is provided "as is" without
express or implied warranty.
FUJITSU LIMITED, FUJITSU OPEN SYSTEMS SOLUTIONS, INC. AND SONY
CORPORATION DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS,
IN NO EVENT SHALL FUJITSU OPEN SYSTEMS SOLUTIONS, INC., FUJITSU LIMITED
AND SONY CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1987, 1988, 1990, 1993 by Digital Equipment Corporation,
Maynard, Massachusetts,
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of Digital not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
DIGITAL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
----------------------------------------
Copyright 1993 by SunSoft, Inc.
Copyright 1999-2000 by Bruno Haible
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the names of SunSoft, Inc. and
Bruno Haible not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior
permission. SunSoft, Inc. and Bruno Haible make no representations
about the suitability of this software for any purpose. It is
provided "as is" without express or implied warranty.
SunSoft Inc. AND Bruno Haible DISCLAIM ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS, IN NO EVENT SHALL SunSoft, Inc. OR Bruno Haible BE LIABLE
FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1991 by the Open Software Foundation
Copyright 1993 by the TOSHIBA Corp.
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the names of Open Software Foundation and TOSHIBA
not be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission. Open Software
Foundation and TOSHIBA make no representations about the suitability of this
software for any purpose. It is provided "as is" without express or
implied warranty.
OPEN SOFTWARE FOUNDATION AND TOSHIBA DISCLAIM ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL OPEN SOFTWARE FOUNDATIONN OR TOSHIBA BE
LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1988 by Wyse Technology, Inc., San Jose, Ca.,
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name Wyse not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
WYSE DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
DIGITAL BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
----------------------------------------
Copyright 1991 by the Open Software Foundation
Copyright 1993, 1994 by the Sony Corporation
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the names of Open Software Foundation and
Sony Corporation not be used in advertising or publicity pertaining to
distribution of the software without specific, written prior permission.
Open Software Foundation and Sony Corporation make no
representations about the suitability of this software for any purpose.
It is provided "as is" without express or implied warranty.
OPEN SOFTWARE FOUNDATION AND SONY CORPORATION DISCLAIM ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL OPEN
SOFTWARE FOUNDATIONN OR SONY CORPORATION BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1992, 1993 by FUJITSU LIMITED
Copyright 1993 by Fujitsu Open Systems Solutions, Inc.
Permission to use, copy, modify, distribute and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of FUJITSU LIMITED and
Fujitsu Open Systems Solutions, Inc. not be used in advertising or
publicity pertaining to distribution of the software without specific,
written prior permission.
FUJITSU LIMITED and Fujitsu Open Systems Solutions, Inc. makes no
representations about the suitability of this software for any purpose.
It is provided "as is" without express or implied warranty.
FUJITSU LIMITED AND FUJITSU OPEN SYSTEMS SOLUTIONS, INC. DISCLAIMS ALL
WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL FUJITSU OPEN SYSTEMS
SOLUTIONS, INC. AND FUJITSU LIMITED BE LIABLE FOR ANY SPECIAL, INDIRECT
OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
OF THIS SOFTWARE.
----------------------------------------
Copyright 1993, 1994 by Sony Corporation
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of Sony Corporation
not be used in advertising or publicity pertaining to distribution
of the software without specific, written prior permission.
Sony Corporation makes no representations about the suitability of
this software for any purpose. It is provided "as is" without
express or implied warranty.
SONY CORPORATION DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
EVENT SHALL SONY CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1986, 1998 The Open Group
Copyright (c) 2000 The XFree86 Project, Inc.
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation.
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
X CONSORTIUM OR THE XFREE86 PROJECT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Except as contained in this notice, the name of the X Consortium or of the
XFree86 Project shall not be used in advertising or otherwise to promote the
sale, use or other dealings in this Software without prior written
authorization from the X Consortium and the XFree86 Project.
----------------------------------------
Copyright 1990, 1991 by OMRON Corporation, NTT Software Corporation,
and Nippon Telegraph and Telephone Corporation
Copyright 1991 by the Open Software Foundation
Copyright 1993 by the FUJITSU LIMITED
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the names of OMRON, NTT Software, NTT, and
Open Software Foundation not be used in advertising or publicity
pertaining to distribution of the software without specific,
written prior permission. OMRON, NTT Software, NTT, and Open Software
Foundation make no representations about the suitability of this
software for any purpose. It is provided "as is" without express or
implied warranty.
OMRON, NTT SOFTWARE, NTT, AND OPEN SOFTWARE FOUNDATION
DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
SHALL OMRON, NTT SOFTWARE, NTT, OR OPEN SOFTWARE FOUNDATION BE
LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1988 by Wyse Technology, Inc., San Jose, Ca,
Copyright 1987 by Digital Equipment Corporation, Maynard, Massachusetts,
All Rights Reserved
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name Digital not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
DIGITAL AND WYSE DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
EVENT SHALL DIGITAL OR WYSE BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 1991, 1992 by Fuji Xerox Co., Ltd.
Copyright 1992, 1993, 1994 by FUJITSU LIMITED
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of Fuji Xerox,
FUJITSU LIMITED not be used in advertising or publicity pertaining
to distribution of the software without specific, written prior
permission. Fuji Xerox, FUJITSU LIMITED make no representations
about the suitability of this software for any purpose.
It is provided "as is" without express or implied warranty.
FUJI XEROX, FUJITSU LIMITED DISCLAIM ALL WARRANTIES WITH
REGARD TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL FUJI XEROX,
FUJITSU LIMITED BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL
DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA
OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 2006 Josh Triplett
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
----------------------------------------
(c) Copyright 1996 by Sebastien Marineau and Holger Veit
<marineau@genie.uottawa.ca>
<Holger.Veit@gmd.de>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
HOLGER VEIT BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
Except as contained in this notice, the name of Sebastien Marineau or Holger Veit
shall not be used in advertising or otherwise to promote the sale, use or other
dealings in this Software without prior written authorization from Holger Veit or
Sebastien Marineau.
----------------------------------------
Copyright 1990, 1991 by OMRON Corporation, NTT Software Corporation,
and Nippon Telegraph and Telephone Corporation
Copyright 1991 by the Open Software Foundation
Copyright 1993 by the TOSHIBA Corp.
Copyright 1993, 1994 by Sony Corporation
Copyright 1993, 1994 by the FUJITSU LIMITED
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the names of OMRON, NTT Software, NTT, Open
Software Foundation, and Sony Corporation not be used in advertising
or publicity pertaining to distribution of the software without specific,
written prior permission. OMRON, NTT Software, NTT, Open Software
Foundation, and Sony Corporation make no representations about the
suitability of this software for any purpose. It is provided "as is"
without express or implied warranty.
OMRON, NTT SOFTWARE, NTT, OPEN SOFTWARE FOUNDATION, AND SONY
CORPORATION DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT
SHALL OMRON, NTT SOFTWARE, NTT, OPEN SOFTWARE FOUNDATION, OR SONY
CORPORATION BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright 2000 by Bruno Haible
Permission to use, copy, modify, distribute, and sell this software
and its documentation for any purpose is hereby granted without fee,
provided that the above copyright notice appear in all copies and
that both that copyright notice and this permission notice appear
in supporting documentation, and that the name of Bruno Haible not
be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission. Bruno Haible
makes no representations about the suitability of this software for
any purpose. It is provided "as is" without express or implied
warranty.
Bruno Haible DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN
NO EVENT SHALL Bruno Haible BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS
OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE
OR PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright © 2003 Keith Packard
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name of Keith Packard not be used in
advertising or publicity pertaining to distribution of the software without
specific, written prior permission. Keith Packard makes no
representations about the suitability of this software for any purpose. It
is provided "as is" without express or implied warranty.
KEITH PACKARD DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
EVENT SHALL KEITH PACKARD BE LIABLE FOR ANY SPECIAL, INDIRECT OR
CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
PERFORMANCE OF THIS SOFTWARE.
----------------------------------------
Copyright (c) 2007-2009, Troy D. Hanson
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
----------------------------------------
Copyright 1992, 1993 by TOSHIBA Corp.
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted, provided
that the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the name of TOSHIBA not be used in advertising
or publicity pertaining to distribution of the software without specific,
written prior permission. TOSHIBA make no representations about the
suitability of this software for any purpose. It is provided "as is"
without express or implied warranty.
TOSHIBA DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO EVENT SHALL
TOSHIBA BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
----------------------------------------
Copyright IBM Corporation 1993
All Rights Reserved
License to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation, and that the name of IBM not be
used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission.
IBM DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE, INCLUDING
ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS, AND
NONINFRINGEMENT OF THIRD PARTY RIGHTS, IN NO EVENT SHALL
IBM BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR
ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
SOFTWARE.
----------------------------------------
Copyright 1990, 1991 by OMRON Corporation, NTT Software Corporation,
and Nippon Telegraph and Telephone Corporation
Permission to use, copy, modify, distribute, and sell this software and its
documentation for any purpose is hereby granted without fee, provided that
the above copyright notice appear in all copies and that both that
copyright notice and this permission notice appear in supporting
documentation, and that the names of OMRON, NTT Software, and NTT
not be used in advertising or publicity pertaining to distribution of the
software without specific, written prior permission. OMRON, NTT Software,
and NTT make no representations about the suitability of this
software for any purpose. It is provided "as is" without express or
implied warranty.
OMRON, NTT SOFTWARE, AND NTT, DISCLAIM ALL WARRANTIES WITH REGARD
TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS, IN NO EVENT SHALL OMRON, NTT SOFTWARE, OR NTT, BE
LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.

View File

@ -0,0 +1,24 @@
Licensing for portions of OpenDroneMap are as follows:
* ImageMagick - Apache 2.0 - http://www.imagemagick.org/script/license.php
* Jhead - None - http://www.sentex.net/~mwandel/jhead/
* libjpeg - GPLv2 - http://sourceforge.net/projects/libjpeg/
* Boost - Boost Software License, Version 1.0 - http://www.boost.org/LICENSE_1_0.txt
* libgsl0 - GPL - http://www.gnu.org/software/gsl/
* liblapack - Modified BSD - http://www.netlib.org/lapack/LICENSE.txt
* Flann - BSD2 - http://opensource.org/licenses/bsd-license.php
* libzip - BSD - http://www.nih.at/libzip/LICENSE.html
* libcv - BSD - http://opencv.org/license.html
* libcvaux - BSD - http://opencv.org/license.html
* bundler - GPLv3 - http://www.gnu.org/copyleft/gpl.html
* parallel - GPLv3 - http://www.gnu.org/copyleft/gpl.html
* PoissonRecon - BSD - http://www.cs.jhu.edu/~misha/Code/PoissonRecon/license.txt
* vlfeat - BSD - http://www.vlfeat.org/license.html
* graclus - GPLv3 - http://www.gnu.org/copyleft/gpl.html
* PROJ.4 - MIT - http://trac.osgeo.org/proj/wiki/WikiStart#License
* PCL - BSD - http://pointclouds.org
* Flann - BSD2 - http://opensource.org/licenses/bsd-license.php
* Eigen - MPL2 - http://www.mozilla.org/MPL/2.0
* Qhull - http://www.qhull.org/COPYING.txt
* libext - https://github.com/OpenDroneMap/OpenDroneMap/blob/gh-pages/licenses/libext_copyright.txt
* libx11 - https://github.com/OpenDroneMap/OpenDroneMap/blob/gh-pages/licenses/libx11_copyright.txt
* MVS Texturing - BSD - https://github.com/nmoehrle/mvs-texturing/blob/master/LICENSE.txt

View File

@ -0,0 +1,25 @@
#ODM 0.3.1 under Debian 8.10 jessie - 2018-02-27 by yjmenezes
#https://github.com/OpenDroneMap/OpenDroneMap/wiki/Docker
#git clone https://github.com/OpenDroneMap/OpenDroneMap.git
git clone https://github.com/yjmenezes/OpenDroneMap.git
cd OpenDroneMap
# list images
docker images
#remove old my_odm_image if necessary
docker rmi my_odm_image
#build a fresh one using instructions from ./Dockerfile
docker build -t my_odm_image .
#run tests with supplied image set.
#Mapped host directories for output. -v host_path:container_path
cd tests/test_data/
sudo rm -r odm_* opensfm
docker run -it --rm \
-v $(pwd)/gcp_list.txt:/code/gcp_list.txt \
-v $(pwd)/images:/code/images \
-v $(pwd)/opensfm:/code/opensfm \
-v $(pwd)/odm_meshing:/code/odm_meshing \
-v $(pwd)/odm_georeferencing:/code/odm_georeferencing \
-v $(pwd)/odm_orthophoto:/code/odm_orthophoto \
-v $(pwd)/odm_texturing:/code/odm_texturing \
my_odm_image --mesh-size 100000

View File

View File

@ -0,0 +1,121 @@
import fiona
import fiona.crs
import os
import io
import json
from opendm import system
from pyproj import CRS
from opendm.location import transformer
from opendm.utils import double_quote
from osgeo import ogr
from opendm.shots import get_origin
def compute_boundary_from_shots(reconstruction_json, buffer=0, reconstruction_offset=(0, 0)):
if not os.path.isfile(reconstruction_json):
raise IOError(reconstruction_json + " does not exist.")
with open(reconstruction_json) as f:
data = json.load(f)
reconstruction = data[0]
mp = ogr.Geometry(ogr.wkbMultiPoint)
for shot_image in reconstruction['shots']:
shot = reconstruction['shots'][shot_image]
if shot.get('gps_dop', 999999) < 999999:
camera = reconstruction['cameras'][shot['camera']]
p = ogr.Geometry(ogr.wkbPoint)
origin = get_origin(shot)
p.AddPoint_2D(origin[0] + reconstruction_offset[0], origin[1] + reconstruction_offset[1])
mp.AddGeometry(p)
if mp.GetGeometryCount() < 3:
return None
convexhull = mp.ConvexHull()
boundary = convexhull.Buffer(buffer)
return load_boundary(boundary.ExportToJson())
def load_boundary(boundary_json, reproject_to_proj4=None):
if not isinstance(boundary_json, str):
boundary_json = json.dumps(boundary_json)
with fiona.open(io.BytesIO(boundary_json.encode('utf-8')), 'r') as src:
if len(src) != 1:
raise IOError("Boundary must have a single polygon (found: %s)" % len(src))
geom = src[0]['geometry']
if geom['type'] != 'Polygon':
raise IOError("Boundary must have a polygon feature (found: %s)" % geom['type'])
rings = geom['coordinates']
if len(rings) == 0:
raise IOError("Boundary geometry has no rings")
coords = rings[0]
if len(coords) == 0:
raise IOError("Boundary geometry has no coordinates")
dimensions = len(coords[0])
if reproject_to_proj4 is not None:
t = transformer(CRS.from_proj4(fiona.crs.to_string(src.crs)),
CRS.from_proj4(reproject_to_proj4))
coords = [t.TransformPoint(*c)[:dimensions] for c in coords]
return coords
def boundary_offset(boundary, reconstruction_offset):
if boundary is None or reconstruction_offset is None:
return boundary
res = []
dims = len(boundary[0])
for c in boundary:
if dims == 2:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1]))
else:
res.append((c[0] - reconstruction_offset[0], c[1] - reconstruction_offset[1], c[2]))
return res
def as_polygon(boundary):
if boundary is None:
return None
return "POLYGON((" + ", ".join([" ".join(map(str, c)) for c in boundary]) + "))"
def as_geojson(boundary):
return '{"type":"FeatureCollection","features":[{"type":"Feature","properties":{},"geometry":{"type":"Polygon","coordinates":[%s]}}]}' % str(list(map(list, boundary)))
def export_to_bounds_files(boundary, proj4, bounds_json_file, bounds_gpkg_file):
with open(bounds_json_file, "w") as f:
f.write(json.dumps({
"type": "FeatureCollection",
"name": "bounds",
"features": [{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [boundary]
}
}]
}))
if os.path.isfile(bounds_gpkg_file):
os.remove(bounds_gpkg_file)
kwargs = {
'proj4': proj4,
'input': double_quote(bounds_json_file),
'output': double_quote(bounds_gpkg_file)
}
system.run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))

View File

@ -0,0 +1,72 @@
import os, json
from opendm import log
def get_cameras_from_opensfm(reconstruction_file):
"""
Extract the cameras from OpenSfM's reconstruction.json
"""
if os.path.exists(reconstruction_file):
with open(reconstruction_file, 'r') as fin:
reconstructions = json.loads(fin.read())
result = {}
for recon in reconstructions:
if 'cameras' in recon:
for camera_id in recon['cameras']:
# Strip "v2" from OpenSfM camera IDs
new_camera_id = camera_id
if new_camera_id.startswith("v2 "):
new_camera_id = new_camera_id[3:]
result[new_camera_id] = recon['cameras'][camera_id]
# Remove "_prior" keys
keys = list(result[new_camera_id].keys())
for k in keys:
if k.endswith('_prior'):
result[new_camera_id].pop(k)
return result
else:
raise RuntimeError("%s does not exist." % reconstruction_file)
def get_opensfm_camera_models(cameras):
"""
Convert cameras to a format OpenSfM can understand
(opposite of get_cameras_from_opensfm)
"""
if isinstance(cameras, dict):
result = {}
for camera_id in cameras:
# Quick check on IDs
if len(camera_id.split(" ")) < 6:
raise RuntimeError("Invalid cameraID: %s" % camera_id)
# Add "v2" to camera ID
if not camera_id.startswith("v2 "):
osfm_camera_id = "v2 " + camera_id
else:
osfm_camera_id = camera_id
# Add "_prior" keys
camera = cameras[camera_id]
prior_fields = ["focal","focal_x","focal_y","c_x","c_y","k1","k2","p1","p2","k3"]
valid_fields = ["id","width","height","projection_type"] + prior_fields + [f + "_prior" for f in prior_fields]
keys = list(camera.keys())
for param in keys:
param_prior = param + "_prior"
if param in prior_fields and not param_prior in camera:
camera[param_prior] = camera[param]
# Remove invalid keys
keys = list(camera.keys())
for k in keys:
if not k in valid_fields:
camera.pop(k)
log.ODM_WARNING("Invalid camera key ignored: %s" % k)
result[osfm_camera_id] = camera
return result
else:
raise RuntimeError("Invalid cameras format: %s. Expected dict." % str(cameras))

View File

@ -0,0 +1,66 @@
import os
import shutil
from opendm import system
from opendm.concurrency import get_max_memory
from opendm import io
from opendm import log
def convert_to_cogeo(src_path, blocksize=256, max_workers=1, compression="DEFLATE"):
"""
Guarantee that the .tif passed as an argument is a Cloud Optimized GeoTIFF (cogeo)
The file is destructively converted into a cogeo.
If the file cannot be converted, the function does not change the file
:param src_path: path to GeoTIFF
:return: True on success
"""
if not os.path.isfile(src_path):
logger.warning("Cannot convert to cogeo: %s (file does not exist)" % src_path)
return False
log.ODM_INFO("Optimizing %s as Cloud Optimized GeoTIFF" % src_path)
tmpfile = io.related_file_path(src_path, postfix='_cogeo')
swapfile = io.related_file_path(src_path, postfix='_cogeo_swap')
kwargs = {
'threads': max_workers if max_workers else 'ALL_CPUS',
'blocksize': blocksize,
'max_memory': get_max_memory(),
'src_path': src_path,
'tmpfile': tmpfile,
'compress': compression,
'predictor': '2' if compression in ['LZW', 'DEFLATE'] else '1',
}
try:
system.run("gdal_translate "
"-of COG "
"-co NUM_THREADS={threads} "
"-co BLOCKSIZE={blocksize} "
"-co COMPRESS={compress} "
"-co PREDICTOR={predictor} "
"-co BIGTIFF=IF_SAFER "
"-co RESAMPLING=NEAREST "
"--config GDAL_CACHEMAX {max_memory}% "
"--config GDAL_NUM_THREADS {threads} "
"\"{src_path}\" \"{tmpfile}\" ".format(**kwargs))
except Exception as e:
log.ODM_WARNING("Cannot create Cloud Optimized GeoTIFF: %s" % str(e))
if os.path.isfile(tmpfile):
shutil.move(src_path, swapfile) # Move to swap location
try:
shutil.move(tmpfile, src_path)
except IOError as e:
log.ODM_WARNING("Cannot move %s to %s: %s" % (tmpfile, src_path, str(e)))
shutil.move(swapfile, src_path) # Attempt to restore
if os.path.isfile(swapfile):
os.remove(swapfile)
return True
else:
return False

View File

@ -0,0 +1,100 @@
from vmem import virtual_memory
import os
import sys
try:
import Queue as queue
except:
import queue
import threading
import time
from opendm import log
def get_max_memory(minimum = 5, use_at_most = 0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
:return percentage value of memory to use (75 = 75%).
"""
return max(minimum, (100 - virtual_memory().percent) * use_at_most)
def get_max_memory_mb(minimum = 100, use_at_most = 0.5):
"""
:param minimum minimum value to return (return value will never be lower than this)
:param use_at_most use at most this fraction of the available memory. 0.5 = use at most 50% of available memory
:return value of memory to use in megabytes.
"""
return max(minimum, (virtual_memory().available / 1024 / 1024) * use_at_most)
def parallel_map(func, items, max_workers=1, single_thread_fallback=True):
"""
Our own implementation for parallel processing
which handles gracefully CTRL+C and reverts to
single thread processing in case of errors
:param items list of objects
:param func function to execute on each object
"""
global error
error = None
def process_one(q):
func(q)
def worker():
global error
while True:
(num, q) = pq.get()
if q is None or error is not None:
pq.task_done()
break
try:
process_one(q)
except Exception as e:
error = e
finally:
pq.task_done()
if max_workers > 1:
use_single_thread = False
pq = queue.PriorityQueue()
threads = []
for i in range(max_workers):
t = threading.Thread(target=worker)
t.start()
threads.append(t)
i = 1
for t in items:
pq.put((i, t.copy()))
i += 1
def stop_workers():
for i in range(len(threads)):
pq.put((-1, None))
for t in threads:
t.join()
# block until all tasks are done
try:
while pq.unfinished_tasks > 0:
time.sleep(0.5)
except KeyboardInterrupt:
print("CTRL+C terminating...")
stop_workers()
sys.exit(1)
stop_workers()
if error is not None and single_thread_fallback:
# Try to reprocess using a single thread
# in case this was a memory error
log.ODM_WARNING("Failed to run process in parallel, retrying with a single thread...")
use_single_thread = True
else:
use_single_thread = True
if use_single_thread:
# Boring, single thread processing
for q in items:
process_one(q)

830
o/ODM/ODM-2.8.7/opendm/config.py Executable file
View File

@ -0,0 +1,830 @@
import argparse
import json
from opendm import context
from opendm import io
from opendm import log
from appsettings import SettingsParser
from pyodm import Node, exceptions
import os
import sys
# parse arguments
processopts = ['dataset', 'split', 'merge', 'opensfm', 'openmvs', 'odm_filterpoints',
'odm_meshing', 'mvs_texturing', 'odm_georeferencing',
'odm_dem', 'odm_orthophoto', 'odm_report', 'odm_postprocess']
with open(os.path.join(context.root_path, 'VERSION')) as version_file:
__version__ = version_file.read().strip()
def path_or_json_string(string):
try:
return io.path_or_json_string_to_dict(string)
except ValueError as e:
raise argparse.ArgumentTypeError("{0}".format(str(e)))
# Django URL validation regex
def url_string(string):
import re
regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.?)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' #domain...
r'localhost|' #localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, string) is None:
raise argparse.ArgumentTypeError("%s is not a valid URL. The URL must be in the format: http(s)://host[:port]/[?token=]" % string)
return string
class RerunFrom(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, processopts[processopts.index(values):])
setattr(namespace, self.dest + '_is_set', True)
class StoreTrue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, True)
setattr(namespace, self.dest + '_is_set', True)
class StoreValue(argparse.Action):
def __call__(self, parser, namespace, values, option_string=None):
setattr(namespace, self.dest, values)
setattr(namespace, self.dest + '_is_set', True)
args = None
def config(argv=None, parser=None):
global args
if args is not None and argv is None:
return args
if sys.platform == 'win32':
usage_bin = 'run'
else:
usage_bin = 'run.sh'
if parser is None:
parser = SettingsParser(description='ODM is a command line toolkit to generate maps, point clouds, 3D models and DEMs from drone, balloon or kite images.',
usage='%s [options] <dataset name>' % usage_bin,
yaml_file=open(context.settings_path))
parser.add_argument('--project-path',
metavar='<path>',
action=StoreValue,
help='Path to the project folder. Your project folder should contain subfolders for each dataset. Each dataset should have an "images" folder.')
parser.add_argument('name',
metavar='<dataset name>',
action=StoreValue,
type=str,
default='code',
nargs='?',
help='Name of dataset (i.e subfolder name within project folder). Default: %(default)s')
parser.add_argument('--resize-to',
metavar='<integer>',
action=StoreValue,
default=2048,
type=int,
help='Legacy option (use --feature-quality instead). Resizes images by the largest side for feature extraction purposes only. '
'Set to -1 to disable. This does not affect the final orthophoto '
'resolution quality and will not resize the original images. Default: %(default)s')
parser.add_argument('--end-with', '-e',
metavar='<string>',
action=StoreValue,
default='odm_postprocess',
choices=processopts,
help='End processing at this stage. Can be one of: %(choices)s. Default: %(default)s')
rerun = parser.add_mutually_exclusive_group()
rerun.add_argument('--rerun', '-r',
metavar='<string>',
action=StoreValue,
choices=processopts,
help=('Rerun this stage only and stop. Can be one of: %(choices)s. Default: %(default)s'))
rerun.add_argument('--rerun-all',
action=StoreTrue,
nargs=0,
default=False,
help='Permanently delete all previous results and rerun the processing pipeline.')
rerun.add_argument('--rerun-from',
action=RerunFrom,
metavar='<string>',
choices=processopts,
help=('Rerun processing from this stage. Can be one of: %(choices)s. Default: %(default)s'))
parser.add_argument('--min-num-features',
metavar='<integer>',
action=StoreValue,
default=10000,
type=int,
help=('Minimum number of features to extract per image. '
'More features can be useful for finding more matches between images, '
'potentially allowing the reconstruction of areas with little overlap or insufficient features. '
'More features also slow down processing. Default: %(default)s'))
parser.add_argument('--feature-type',
metavar='<string>',
action=StoreValue,
default='sift',
choices=['akaze', 'hahog', 'orb', 'sift'],
help=('Choose the algorithm for extracting keypoints and computing descriptors. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--feature-quality',
metavar='<string>',
action=StoreValue,
default='high',
choices=['ultra', 'high', 'medium', 'low', 'lowest'],
help=('Set feature extraction quality. Higher quality generates better features, but requires more memory and takes longer. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--matcher-type',
metavar='<string>',
action=StoreValue,
default='flann',
choices=['bow', 'bruteforce', 'flann'],
help=('Matcher algorithm, Fast Library for Approximate Nearest Neighbors or Bag of Words. FLANN is slower, but more stable. BOW is faster, but can sometimes miss valid matches. BRUTEFORCE is very slow but robust.'
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--matcher-neighbors',
metavar='<positive integer>',
action=StoreValue,
default=0,
type=int,
help='Perform image matching with the nearest images based on GPS exif data. Set to 0 to match by triangulation. Default: %(default)s')
parser.add_argument('--use-fixed-camera-params',
action=StoreTrue,
nargs=0,
default=False,
help='Turn off camera parameter optimization during bundle adjustment. This can be sometimes useful for improving results that exhibit doming/bowling or when images are taken with a rolling shutter camera. Default: %(default)s')
parser.add_argument('--cameras',
default='',
metavar='<json>',
action=StoreValue,
type=path_or_json_string,
help='Use the camera parameters computed from '
'another dataset instead of calculating them. '
'Can be specified either as path to a cameras.json file or as a '
'JSON string representing the contents of a '
'cameras.json file. Default: %(default)s')
parser.add_argument('--camera-lens',
metavar='<string>',
action=StoreValue,
default='auto',
choices=['auto', 'perspective', 'brown', 'fisheye', 'spherical', 'equirectangular', 'dual'],
help=('Set a camera projection type. Manually setting a value '
'can help improve geometric undistortion. By default the application '
'tries to determine a lens type from the images metadata. Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--radiometric-calibration',
metavar='<string>',
action=StoreValue,
default='none',
choices=['none', 'camera', 'camera+sun'],
help=('Set the radiometric calibration to perform on images. '
'When processing multispectral and thermal images you should set this option '
'to obtain reflectance/temperature values (otherwise you will get digital number values). '
'[camera] applies black level, vignetting, row gradient gain/exposure compensation (if appropriate EXIF tags are found) and computes absolute temperature values. '
'[camera+sun] is experimental, applies all the corrections of [camera], plus compensates for spectral radiance registered via a downwelling light sensor (DLS) taking in consideration the angle of the sun. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--max-concurrency',
metavar='<positive integer>',
action=StoreValue,
default=context.num_cores,
type=int,
help=('The maximum number of processes to use in various '
'processes. Peak memory requirement is ~1GB per '
'thread and 2 megapixel image resolution. Default: %(default)s'))
parser.add_argument('--depthmap-resolution',
metavar='<positive float>',
action=StoreValue,
type=float,
default=640,
help=('Controls the density of the point cloud by setting the resolution of the depthmap images. Higher values take longer to compute '
'but produce denser point clouds. Overrides the value calculated by --pc-quality.'
'Default: %(default)s'))
parser.add_argument('--use-hybrid-bundle-adjustment',
action=StoreTrue,
nargs=0,
default=False,
help='Run local bundle adjustment for every image added to the reconstruction and a global '
'adjustment every 100 images. Speeds up reconstruction for very large datasets. Default: %(default)s')
parser.add_argument('--sfm-algorithm',
metavar='<string>',
action=StoreValue,
default='incremental',
choices=['incremental', 'triangulation', 'planar'],
help=('Choose the structure from motion algorithm. For aerial datasets, if camera GPS positions and angles are available, triangulation can generate better results. For planar scenes captured at fixed altitude with nadir-only images, planar can be much faster. '
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--use-3dmesh',
action=StoreTrue,
nargs=0,
default=False,
help='Use a full 3D mesh to compute the orthophoto instead of a 2.5D mesh. This option is a bit faster and provides similar results in planar areas. Default: %(default)s')
parser.add_argument('--skip-3dmodel',
action=StoreTrue,
nargs=0,
default=False,
help='Skip generation of a full 3D model. This can save time if you only need 2D results such as orthophotos and DEMs. Default: %(default)s')
parser.add_argument('--skip-report',
action=StoreTrue,
nargs=0,
default=False,
help='Skip generation of PDF report. This can save time if you don\'t need a report. Default: %(default)s')
parser.add_argument('--skip-orthophoto',
action=StoreTrue,
nargs=0,
default=False,
help='Skip generation of the orthophoto. This can save time if you only need 3D results or DEMs. Default: %(default)s')
parser.add_argument('--ignore-gsd',
action=StoreTrue,
nargs=0,
default=False,
help='Ignore Ground Sampling Distance (GSD). GSD '
'caps the maximum resolution of image outputs and '
'resizes images when necessary, resulting in faster processing and '
'lower memory usage. Since GSD is an estimate, sometimes ignoring it can result in slightly better image output quality. Default: %(default)s')
parser.add_argument('--no-gpu',
action=StoreTrue,
nargs=0,
default=False,
help='Do not use GPU acceleration, even if it\'s available. Default: %(default)s')
parser.add_argument('--mesh-size',
metavar='<positive integer>',
action=StoreValue,
default=200000,
type=int,
help=('The maximum vertex count of the output mesh. '
'Default: %(default)s'))
parser.add_argument('--mesh-octree-depth',
metavar='<integer: 1 <= x <= 14>',
action=StoreValue,
default=11,
type=int,
help=('Octree depth used in the mesh reconstruction, '
'increase to get more vertices, recommended '
'values are 8-12. Default: %(default)s'))
parser.add_argument('--fast-orthophoto',
action=StoreTrue,
nargs=0,
default=False,
help='Skips dense reconstruction and 3D model generation. '
'It generates an orthophoto directly from the sparse reconstruction. '
'If you just need an orthophoto and do not need a full 3D model, turn on this option. Default: %(default)s')
parser.add_argument('--crop',
metavar='<positive float>',
action=StoreValue,
default=3,
type=float,
help=('Automatically crop image outputs by creating a smooth buffer '
'around the dataset boundaries, shrunk by N meters. '
'Use 0 to disable cropping. '
'Default: %(default)s'))
parser.add_argument('--boundary',
default='',
metavar='<json>',
action=StoreValue,
type=path_or_json_string,
help='GeoJSON polygon limiting the area of the reconstruction. '
'Can be specified either as path to a GeoJSON file or as a '
'JSON string representing the contents of a '
'GeoJSON file. Default: %(default)s')
parser.add_argument('--auto-boundary',
action=StoreTrue,
nargs=0,
default=False,
help='Automatically set a boundary using camera shot locations to limit the area of the reconstruction. '
'This can help remove far away background artifacts (sky, background landscapes, etc.). See also --boundary. '
'Default: %(default)s')
parser.add_argument('--pc-quality',
metavar='<string>',
action=StoreValue,
default='medium',
choices=['ultra', 'high', 'medium', 'low', 'lowest'],
help=('Set point cloud quality. Higher quality generates better, denser point clouds, but requires more memory and takes longer. Each step up in quality increases processing time roughly by a factor of 4x.'
'Can be one of: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--pc-classify',
action=StoreTrue,
nargs=0,
default=False,
help='Classify the point cloud outputs using a Simple Morphological Filter. '
'You can control the behavior of this option by tweaking the --dem-* parameters. '
'Default: '
'%(default)s')
parser.add_argument('--pc-csv',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in CSV format. Default: %(default)s')
parser.add_argument('--pc-las',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in LAS format. Default: %(default)s')
parser.add_argument('--pc-ept',
action=StoreTrue,
nargs=0,
default=False,
help='Export the georeferenced point cloud in Entwine Point Tile (EPT) format. Default: %(default)s')
parser.add_argument('--pc-copc',
action=StoreTrue,
nargs=0,
default=False,
help='Save the georeferenced point cloud in Cloud Optimized Point Cloud (COPC) format. Default: %(default)s')
parser.add_argument('--pc-filter',
metavar='<positive float>',
action=StoreValue,
type=float,
default=2.5,
help='Filters the point cloud by removing points that deviate more than N standard deviations from the local mean. Set to 0 to disable filtering. '
'Default: %(default)s')
parser.add_argument('--pc-sample',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0,
help='Filters the point cloud by keeping only a single point around a radius N (in meters). This can be useful to limit the output resolution of the point cloud and remove duplicate points. Set to 0 to disable sampling. '
'Default: %(default)s')
parser.add_argument('--pc-tile',
action=StoreTrue,
nargs=0,
default=False,
help='Reduce the memory usage needed for depthmap fusion by splitting large scenes into tiles. Turn this on if your machine doesn\'t have much RAM and/or you\'ve set --pc-quality to high or ultra. Experimental. '
'Default: %(default)s')
parser.add_argument('--pc-geometric',
action=StoreTrue,
nargs=0,
default=False,
help='Improve the accuracy of the point cloud by computing geometrically consistent depthmaps. This increases processing time, but can improve results in urban scenes. '
'Default: %(default)s')
parser.add_argument('--smrf-scalar',
metavar='<positive float>',
action=StoreValue,
type=float,
default=1.25,
help='Simple Morphological Filter elevation scalar parameter. '
'Default: %(default)s')
parser.add_argument('--smrf-slope',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0.15,
help='Simple Morphological Filter slope parameter (rise over run). '
'Default: %(default)s')
parser.add_argument('--smrf-threshold',
metavar='<positive float>',
action=StoreValue,
type=float,
default=0.5,
help='Simple Morphological Filter elevation threshold parameter (meters). '
'Default: %(default)s')
parser.add_argument('--smrf-window',
metavar='<positive float>',
action=StoreValue,
type=float,
default=18.0,
help='Simple Morphological Filter window radius parameter (meters). '
'Default: %(default)s')
parser.add_argument('--texturing-data-term',
metavar='<string>',
action=StoreValue,
default='gmi',
choices=['gmi', 'area'],
help=('When texturing the 3D mesh, for each triangle, choose to prioritize images with sharp features (gmi) or those that cover the largest area (area). Default: %(default)s'))
parser.add_argument('--texturing-outlier-removal-type',
metavar='<string>',
action=StoreValue,
default='gauss_clamping',
choices=['none', 'gauss_clamping', 'gauss_damping'],
help=('Type of photometric outlier removal method. Can be one of: %(choices)s. Default: %(default)s'))
parser.add_argument('--texturing-skip-global-seam-leveling',
action=StoreTrue,
nargs=0,
default=False,
help=('Skip normalization of colors across all images. Useful when processing radiometric data. Default: %(default)s'))
parser.add_argument('--texturing-skip-local-seam-leveling',
action=StoreTrue,
nargs=0,
default=False,
help='Skip the blending of colors near seams. Default: %(default)s')
parser.add_argument('--texturing-keep-unseen-faces',
action=StoreTrue,
nargs=0,
default=False,
help=('Keep faces in the mesh that are not seen in any camera. '
'Default: %(default)s'))
parser.add_argument('--texturing-tone-mapping',
metavar='<string>',
action=StoreValue,
choices=['none', 'gamma'],
default='none',
help='Turn on gamma tone mapping or none for no tone '
'mapping. Can be one of %(choices)s. '
'Default: %(default)s ')
parser.add_argument('--gcp',
metavar='<path string>',
action=StoreValue,
default=None,
help=('Path to the file containing the ground control '
'points used for georeferencing. '
'The file needs to '
'use the following format: \n'
'EPSG:<code> or <+proj definition>\n'
'geo_x geo_y geo_z im_x im_y image_name [gcp_name] [extra1] [extra2]\n'
'Default: %(default)s'))
parser.add_argument('--geo',
metavar='<path string>',
action=StoreValue,
default=None,
help=('Path to the image geolocation file containing the camera center coordinates used for georeferencing. '
'Note that omega/phi/kappa are currently not supported (you can set them to 0). '
'The file needs to '
'use the following format: \n'
'EPSG:<code> or <+proj definition>\n'
'image_name geo_x geo_y geo_z [omega (degrees)] [phi (degrees)] [kappa (degrees)] [horz accuracy (meters)] [vert accuracy (meters)]\n'
'Default: %(default)s'))
parser.add_argument('--use-exif',
action=StoreTrue,
nargs=0,
default=False,
help=('Use this tag if you have a GCP File but '
'want to use the EXIF information for georeferencing instead. Default: %(default)s'))
parser.add_argument('--dtm',
action=StoreTrue,
nargs=0,
default=False,
help='Use this tag to build a DTM (Digital Terrain Model, ground only) using a simple '
'morphological filter. Check the --dem* and --smrf* parameters for finer tuning. Default: %(default)s')
parser.add_argument('--dsm',
action=StoreTrue,
nargs=0,
default=False,
help='Use this tag to build a DSM (Digital Surface Model, ground + objects) using a progressive '
'morphological filter. Check the --dem* parameters for finer tuning. Default: %(default)s')
parser.add_argument('--dem-gapfill-steps',
metavar='<positive integer>',
action=StoreValue,
default=3,
type=int,
help='Number of steps used to fill areas with gaps. Set to 0 to disable gap filling. '
'Starting with a radius equal to the output resolution, N different DEMs are generated with '
'progressively bigger radius using the inverse distance weighted (IDW) algorithm '
'and merged together. Remaining gaps are then merged using nearest neighbor interpolation. '
'Default: %(default)s')
parser.add_argument('--dem-resolution',
metavar='<float>',
action=StoreValue,
type=float,
default=5,
help='DSM/DTM resolution in cm / pixel. Note that this value is capped to 2x the ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also.'
' Default: %(default)s')
parser.add_argument('--dem-decimation',
metavar='<positive integer>',
action=StoreValue,
default=1,
type=int,
help='Decimate the points before generating the DEM. 1 is no decimation (full quality). '
'100 decimates ~99%% of the points. Useful for speeding up generation of DEM results in very large datasets. Default: %(default)s')
parser.add_argument('--dem-euclidean-map',
action=StoreTrue,
nargs=0,
default=False,
help='Computes an euclidean raster map for each DEM. '
'The map reports the distance from each cell to the nearest '
'NODATA value (before any hole filling takes place). '
'This can be useful to isolate the areas that have been filled. '
'Default: '
'%(default)s')
parser.add_argument('--orthophoto-resolution',
metavar='<float > 0.0>',
action=StoreValue,
default=5,
type=float,
help=('Orthophoto resolution in cm / pixel. Note that this value is capped by a ground sampling distance (GSD) estimate. To remove the cap, check --ignore-gsd also. '
'Default: %(default)s'))
parser.add_argument('--orthophoto-no-tiled',
action=StoreTrue,
nargs=0,
default=False,
help='Set this parameter if you want a striped GeoTIFF. '
'Default: %(default)s')
parser.add_argument('--orthophoto-png',
action=StoreTrue,
nargs=0,
default=False,
help='Set this parameter if you want to generate a PNG rendering of the orthophoto. '
'Default: %(default)s')
parser.add_argument('--orthophoto-kmz',
action=StoreTrue,
nargs=0,
default=False,
help='Set this parameter if you want to generate a Google Earth (KMZ) rendering of the orthophoto. '
'Default: %(default)s')
parser.add_argument('--orthophoto-compression',
metavar='<string>',
action=StoreValue,
type=str,
choices=['JPEG', 'LZW', 'PACKBITS', 'DEFLATE', 'LZMA', 'NONE'],
default='DEFLATE',
help='Set the compression to use for orthophotos. Can be one of: %(choices)s. Default: %(default)s')
parser.add_argument('--orthophoto-cutline',
action=StoreTrue,
nargs=0,
default=False,
help='Generates a polygon around the cropping area '
'that cuts the orthophoto around the edges of features. This polygon '
'can be useful for stitching seamless mosaics with multiple overlapping orthophotos. '
'Default: '
'%(default)s')
parser.add_argument('--tiles',
action=StoreTrue,
nargs=0,
default=False,
help='Generate static tiles for orthophotos and DEMs that are '
'suitable for viewers like Leaflet or OpenLayers. '
'Default: %(default)s')
parser.add_argument('--3d-tiles',
action=StoreTrue,
nargs=0,
default=False,
help='Generate OGC 3D Tiles outputs. Default: %(default)s')
parser.add_argument('--rolling-shutter',
action=StoreTrue,
nargs=0,
default=False,
help='Turn on rolling shutter correction. If the camera '
'has a rolling shutter and the images were taken in motion, you can turn on this option '
'to improve the accuracy of the results. See also --rolling-shutter-readout. '
'Default: %(default)s')
parser.add_argument('--rolling-shutter-readout',
type=float,
action=StoreValue,
metavar='<positive integer>',
default=0,
help='Override the rolling shutter readout time for your camera sensor (in milliseconds), instead of using the rolling shutter readout database. '
'Note that not all cameras are present in the database. Set to 0 to use the database value. '
'Default: %(default)s')
parser.add_argument('--build-overviews',
action=StoreTrue,
nargs=0,
default=False,
help='Build orthophoto overviews for faster display in programs such as QGIS. Default: %(default)s')
parser.add_argument('--cog',
action=StoreTrue,
nargs=0,
default=False,
help='Create Cloud-Optimized GeoTIFFs instead of normal GeoTIFFs. Default: %(default)s')
parser.add_argument('--verbose', '-v',
action=StoreTrue,
nargs=0,
default=False,
help='Print additional messages to the console. '
'Default: %(default)s')
parser.add_argument('--copy-to',
metavar='<path>',
action=StoreValue,
help='Copy output results to this folder after processing.')
parser.add_argument('--time',
action=StoreTrue,
nargs=0,
default=False,
help='Generates a benchmark file with runtime info. '
'Default: %(default)s')
parser.add_argument('--debug',
action=StoreTrue,
nargs=0,
default=False,
help='Print debug messages. Default: %(default)s')
parser.add_argument('--version',
action='version',
version='ODM {0}'.format(__version__),
help='Displays version number and exits. ')
parser.add_argument('--split',
type=int,
action=StoreValue,
default=999999,
metavar='<positive integer>',
help='Average number of images per submodel. When '
'splitting a large dataset into smaller '
'submodels, images are grouped into clusters. '
'This value regulates the number of images that '
'each cluster should have on average. Default: %(default)s')
parser.add_argument('--split-overlap',
type=float,
action=StoreValue,
metavar='<positive integer>',
default=150,
help='Radius of the overlap between submodels. '
'After grouping images into clusters, images '
'that are closer than this radius to a cluster '
'are added to the cluster. This is done to ensure '
'that neighboring submodels overlap. Default: %(default)s')
parser.add_argument('--split-image-groups',
metavar='<path string>',
action=StoreValue,
default=None,
help=('Path to the image groups file that controls how images should be split into groups. '
'The file needs to use the following format: \n'
'image_name group_name\n'
'Default: %(default)s'))
# parser.add_argument('--split-multitracks',
# action=StoreTrue,
# nargs=0,
# default=False,
# help='Split multi-track reconstructions.')
parser.add_argument('--sm-cluster',
metavar='<string>',
action=StoreValue,
type=url_string,
default=None,
help='URL to a ClusterODM instance '
'for distributing a split-merge workflow on '
'multiple nodes in parallel. '
'Default: %(default)s')
parser.add_argument('--merge',
metavar='<string>',
action=StoreValue,
default='all',
choices=['all', 'pointcloud', 'orthophoto', 'dem'],
help=('Choose what to merge in the merge step in a split dataset. '
'By default all available outputs are merged. '
'Options: %(choices)s. Default: '
'%(default)s'))
parser.add_argument('--force-gps',
action=StoreTrue,
nargs=0,
default=False,
help=('Use images\' GPS exif data for reconstruction, even if there are GCPs present.'
'This flag is useful if you have high precision GPS measurements. '
'If there are no GCPs, this flag does nothing. Default: %(default)s'))
parser.add_argument('--gps-accuracy',
type=float,
action=StoreValue,
metavar='<positive float>',
default=10,
help='Set a value in meters for the GPS Dilution of Precision (DOP) '
'information for all images. If your images are tagged '
'with high precision GPS information (RTK), this value will be automatically '
'set accordingly. You can use this option to manually set it in case the reconstruction '
'fails. Lowering this option can sometimes help control bowling-effects over large areas. Default: %(default)s')
parser.add_argument('--optimize-disk-space',
action=StoreTrue,
nargs=0,
default=False,
help=('Delete heavy intermediate files to optimize disk space usage. This '
'affects the ability to restart the pipeline from an intermediate stage, '
'but allows datasets to be processed on machines that don\'t have sufficient '
'disk space available. Default: %(default)s'))
parser.add_argument('--pc-rectify',
action=StoreTrue,
nargs=0,
default=False,
help=('Perform ground rectification on the point cloud. This means that wrongly classified ground '
'points will be re-classified and gaps will be filled. Useful for generating DTMs. '
'Default: %(default)s'))
parser.add_argument('--primary-band',
metavar='<string>',
action=StoreValue,
default="auto",
type=str,
help=('When processing multispectral datasets, you can specify the name of the primary band that will be used for reconstruction. '
'It\'s recommended to choose a band which has sharp details and is in focus. '
'Default: %(default)s'))
parser.add_argument('--skip-band-alignment',
action=StoreTrue,
nargs=0,
default=False,
help=('When processing multispectral datasets, ODM will automatically align the images for each band. '
'If the images have been postprocessed and are already aligned, use this option. '
'Default: %(default)s'))
args = parser.parse_args(argv)
# check that the project path setting has been set properly
if not args.project_path:
log.ODM_ERROR('You need to set the project path in the '
'settings.yaml file before you can run ODM, '
'or use `--project-path <path>`. Run `python3 '
'run.py --help` for more information. ')
sys.exit(1)
if args.fast_orthophoto:
log.ODM_INFO('Fast orthophoto is turned on, automatically setting --skip-3dmodel')
args.skip_3dmodel = True
if args.pc_rectify and not args.pc_classify:
log.ODM_INFO("Ground rectify is turned on, automatically turning on point cloud classification")
args.pc_classify = True
if args.dtm and not args.pc_classify:
log.ODM_INFO("DTM is turned on, automatically turning on point cloud classification")
args.pc_classify = True
if args.skip_3dmodel and args.use_3dmesh:
log.ODM_WARNING('--skip-3dmodel is set, but so is --use-3dmesh. --skip-3dmodel will be ignored.')
args.skip_3dmodel = False
if args.orthophoto_cutline and not args.crop:
log.ODM_WARNING("--orthophoto-cutline is set, but --crop is not. --crop will be set to 0.01")
args.crop = 0.01
if args.sm_cluster:
try:
Node.from_url(args.sm_cluster).info()
except exceptions.NodeConnectionError as e:
log.ODM_ERROR("Cluster node seems to be offline: %s" % str(e))
sys.exit(1)
return args

View File

@ -0,0 +1,51 @@
import os
import sys
import multiprocessing
# Define some needed locations
current_path = os.path.abspath(os.path.dirname(__file__))
root_path, _ = os.path.split(current_path)
superbuild_path = os.path.join(root_path, 'SuperBuild')
superbuild_bin_path = os.path.join(superbuild_path, 'install', 'bin')
# add opencv,opensfm to python path
python_packages_paths = [os.path.join(superbuild_path, p) for p in [
'install/lib/python3.9/dist-packages',
'install/lib/python3.8/dist-packages',
'install/lib/python3/dist-packages',
'install/bin/opensfm'
]]
for p in python_packages_paths:
sys.path.append(p)
# define opensfm path
opensfm_path = os.path.join(superbuild_bin_path, "opensfm")
poisson_recon_path = os.path.join(superbuild_bin_path, 'PoissonRecon')
dem2mesh_path = os.path.join(superbuild_bin_path, 'dem2mesh')
dem2points_path = os.path.join(superbuild_bin_path, 'dem2points')
# define mvstex path
mvstex_path = os.path.join(superbuild_bin_path, "texrecon")
# openmvs paths
omvs_densify_path = os.path.join(superbuild_bin_path, "OpenMVS", "DensifyPointCloud")
omvs_reconstructmesh_path = os.path.join(superbuild_bin_path, "OpenMVS", "ReconstructMesh")
fpcfilter_path = os.path.join(superbuild_bin_path, "FPCFilter")
odm_orthophoto_path = os.path.join(superbuild_bin_path, "odm_orthophoto")
settings_path = os.path.join(root_path, 'settings.yaml')
# Define supported image extensions
supported_extensions = {'.jpg','.jpeg','.png', '.tif', '.tiff', '.bmp'}
# Define the number of cores
num_cores = multiprocessing.cpu_count()
# Print python paths if invoked as a script
if __name__ == "__main__":
print("export PYTHONPATH=" + ":".join(python_packages_paths))

View File

@ -0,0 +1,269 @@
from opendm import context
from opendm.system import run
from opendm import log
from opendm.point_cloud import export_summary_json
from osgeo import ogr
import json, os
from opendm.concurrency import get_max_memory
from opendm.utils import double_quote
class Cropper:
def __init__(self, storage_dir, files_prefix = "crop"):
self.storage_dir = storage_dir
self.files_prefix = files_prefix
def path(self, suffix):
"""
@return a path relative to storage_dir and prefixed with files_prefix
"""
return os.path.join(self.storage_dir, '{}.{}'.format(self.files_prefix, suffix))
@staticmethod
def crop(gpkg_path, geotiff_path, gdal_options, keep_original=True, warp_options=[]):
if not os.path.exists(gpkg_path) or not os.path.exists(geotiff_path):
log.ODM_WARNING("Either {} or {} does not exist, will skip cropping.".format(gpkg_path, geotiff_path))
return geotiff_path
log.ODM_INFO("Cropping %s" % geotiff_path)
# Rename original file
# path/to/odm_orthophoto.tif --> path/to/odm_orthophoto.original.tif
path, filename = os.path.split(geotiff_path)
# path = path/to
# filename = odm_orthophoto.tif
basename, ext = os.path.splitext(filename)
# basename = odm_orthophoto
# ext = .tif
original_geotiff = os.path.join(path, "{}.original{}".format(basename, ext))
os.replace(geotiff_path, original_geotiff)
try:
kwargs = {
'gpkg_path': double_quote(gpkg_path),
'geotiffInput': double_quote(original_geotiff),
'geotiffOutput': double_quote(geotiff_path),
'options': ' '.join(map(lambda k: '-co {}={}'.format(k, gdal_options[k]), gdal_options)),
'warpOptions': ' '.join(warp_options),
'max_memory': get_max_memory()
}
run('gdalwarp -cutline {gpkg_path} '
'-crop_to_cutline '
'{options} '
'{warpOptions} '
'{geotiffInput} '
'{geotiffOutput} '
'--config GDAL_CACHEMAX {max_memory}%'.format(**kwargs))
if not keep_original:
os.remove(original_geotiff)
except Exception as e:
log.ODM_WARNING('Something went wrong while cropping: {}'.format(e))
# Revert rename
os.replace(original_geotiff, geotiff_path)
return geotiff_path
@staticmethod
def merge_bounds(input_bound_files, output_bounds, buffer_distance = 0):
"""
Merge multiple bound files into a single bound computed from the convex hull
of all bounds (minus a buffer distance in meters)
"""
geomcol = ogr.Geometry(ogr.wkbGeometryCollection)
driver = ogr.GetDriverByName('GPKG')
srs = None
for input_bound_file in input_bound_files:
ds = driver.Open(input_bound_file, 0) # ready-only
layer = ds.GetLayer()
srs = layer.GetSpatialRef()
# Collect all Geometry
for feature in layer:
geomcol.AddGeometry(feature.GetGeometryRef())
ds = None
# Calculate convex hull
convexhull = geomcol.ConvexHull()
# If buffer distance is specified
# Create two buffers, one shrunk by
# N + 3 and then that buffer expanded by 3
# so that we get smooth corners. \m/
BUFFER_SMOOTH_DISTANCE = 3
if buffer_distance > 0:
convexhull = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
convexhull = convexhull.Buffer(BUFFER_SMOOTH_DISTANCE)
# Save to a new file
if os.path.exists(output_bounds):
driver.DeleteDataSource(output_bounds)
out_ds = driver.CreateDataSource(output_bounds)
layer = out_ds.CreateLayer("convexhull", srs=srs, geom_type=ogr.wkbPolygon)
feature_def = layer.GetLayerDefn()
feature = ogr.Feature(feature_def)
feature.SetGeometry(convexhull)
layer.CreateFeature(feature)
feature = None
# Save and close output data source
out_ds = None
def create_bounds_geojson(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
@return filename to GeoJSON containing the polygon
"""
if not os.path.exists(pointcloud_path):
log.ODM_WARNING('Point cloud does not exist, cannot generate bounds {}'.format(pointcloud_path))
return ''
# Do decimation prior to extracting boundary information
decimated_pointcloud_path = self.path('decimated.las')
run("pdal translate -i \"{}\" "
"-o \"{}\" "
"decimation "
"--filters.decimation.step={} ".format(pointcloud_path, decimated_pointcloud_path, decimation_step))
if not os.path.exists(decimated_pointcloud_path):
log.ODM_WARNING('Could not decimate point cloud, thus cannot generate GPKG bounds {}'.format(decimated_pointcloud_path))
return ''
# Use PDAL to dump boundary information
# then read the information back
boundary_file_path = self.path('boundary.json')
run('pdal info --boundary --filters.hexbin.edge_size=1 --filters.hexbin.threshold=0 "{0}" > "{1}"'.format(decimated_pointcloud_path, boundary_file_path))
pc_geojson_boundary_feature = None
with open(boundary_file_path, 'r') as f:
json_f = json.loads(f.read())
pc_geojson_boundary_feature = json_f['boundary']['boundary_json']
if pc_geojson_boundary_feature is None: raise RuntimeError("Could not determine point cloud boundaries")
# Write bounds to GeoJSON
tmp_bounds_geojson_path = self.path('tmp-bounds.geojson')
with open(tmp_bounds_geojson_path, "w") as f:
f.write(json.dumps({
"type": "FeatureCollection",
"features": [{
"type": "Feature",
"geometry": pc_geojson_boundary_feature
}]
}))
# Create a convex hull around the boundary
# as to encompass the entire area (no holes)
driver = ogr.GetDriverByName('GeoJSON')
ds = driver.Open(tmp_bounds_geojson_path, 0) # ready-only
layer = ds.GetLayer()
# Collect all Geometry
geomcol = ogr.Geometry(ogr.wkbGeometryCollection)
for feature in layer:
geomcol.AddGeometry(feature.GetGeometryRef())
# Calculate convex hull
convexhull = geomcol.ConvexHull()
# If buffer distance is specified
# Create two buffers, one shrunk by
# N + 3 and then that buffer expanded by 3
# so that we get smooth corners. \m/
BUFFER_SMOOTH_DISTANCE = 3
if buffer_distance > 0:
# For small areas, check that buffering doesn't obliterate
# our hull
tmp = convexhull.Buffer(-(buffer_distance + BUFFER_SMOOTH_DISTANCE))
tmp = tmp.Buffer(BUFFER_SMOOTH_DISTANCE)
if tmp.Area() > 0:
convexhull = tmp
else:
log.ODM_WARNING("Very small crop area detected, we will not smooth it.")
# Save to a new file
bounds_geojson_path = self.path('bounds.geojson')
if os.path.exists(bounds_geojson_path):
os.remove(bounds_geojson_path)
out_ds = driver.CreateDataSource(bounds_geojson_path)
layer = out_ds.CreateLayer("convexhull", geom_type=ogr.wkbPolygon)
feature_def = layer.GetLayerDefn()
feature = ogr.Feature(feature_def)
feature.SetGeometry(convexhull)
layer.CreateFeature(feature)
feature = None
# Save and close data sources
out_ds = ds = None
# Remove decimated point cloud
if os.path.exists(decimated_pointcloud_path):
os.remove(decimated_pointcloud_path)
# Remove tmp bounds
if os.path.exists(tmp_bounds_geojson_path):
os.remove(tmp_bounds_geojson_path)
return bounds_geojson_path
def create_bounds_gpkg(self, pointcloud_path, buffer_distance = 0, decimation_step=40):
"""
Compute a buffered polygon around the data extents (not just a bounding box)
of the given point cloud.
@return filename to Geopackage containing the polygon
"""
if not os.path.exists(pointcloud_path):
log.ODM_WARNING('Point cloud does not exist, cannot generate GPKG bounds {}'.format(pointcloud_path))
return ''
bounds_geojson_path = self.create_bounds_geojson(pointcloud_path, buffer_distance, decimation_step)
summary_file_path = os.path.join(self.storage_dir, '{}.summary.json'.format(self.files_prefix))
export_summary_json(pointcloud_path, summary_file_path)
pc_proj4 = None
with open(summary_file_path, 'r') as f:
json_f = json.loads(f.read())
pc_proj4 = json_f['summary']['srs']['proj4']
if pc_proj4 is None: raise RuntimeError("Could not determine point cloud proj4 declaration")
bounds_gpkg_path = os.path.join(self.storage_dir, '{}.bounds.gpkg'.format(self.files_prefix))
if os.path.isfile(bounds_gpkg_path):
os.remove(bounds_gpkg_path)
# Convert bounds to GPKG
kwargs = {
'input': double_quote(bounds_geojson_path),
'output': double_quote(bounds_gpkg_path),
'proj4': pc_proj4
}
run('ogr2ogr -overwrite -f GPKG -a_srs "{proj4}" {output} {input}'.format(**kwargs))
return bounds_gpkg_path

View File

@ -0,0 +1,188 @@
import os
import shutil
import rasterio
import fiona
import numpy as np
import math
import sys
from opendm import log
from opendm import io
from opendm import concurrency
from opendm import get_image_size
from opendm import system
from skimage.feature import canny
from skimage.draw import line
from skimage.graph import route_through_array
import shapely
from shapely.geometry import LineString, mapping, shape
from shapely.ops import polygonize, unary_union
if sys.platform == 'win32':
# Temporary fix for: ValueError: GEOSGeom_createLinearRing_r returned a NULL pointer
# https://github.com/Toblerity/Shapely/issues/1005
shapely.speedups.disable()
def write_raster(data, file):
profile = {
'driver': 'GTiff',
'width': data.shape[1],
'height': data.shape[0],
'count': 1,
'dtype': 'float32',
'transform': None,
'nodata': None,
'crs': None
}
with rasterio.open(file, 'w', BIGTIFF="IF_SAFER", **profile) as wout:
wout.write(data, 1)
def compute_cutline(orthophoto_file, crop_area_file, destination, max_concurrency=1, scale=1):
if io.file_exists(orthophoto_file) and io.file_exists(crop_area_file):
log.ODM_INFO("Computing cutline")
scale = max(0.0001, min(1, scale))
scaled_orthophoto = None
if scale < 1:
log.ODM_INFO("Scaling orthophoto to %s%% to compute cutline" % (scale * 100))
scaled_orthophoto = io.related_file_path(orthophoto_file, postfix=".scaled")
# Scale orthophoto before computing cutline
system.run("gdal_translate -outsize {}% 0 "
"-co NUM_THREADS={} "
"--config GDAL_CACHEMAX {}% "
'"{}" "{}"'.format(
scale * 100,
max_concurrency,
concurrency.get_max_memory(),
orthophoto_file,
scaled_orthophoto
))
orthophoto_file = scaled_orthophoto
# open raster
f = rasterio.open(orthophoto_file)
rast = f.read(1) # First band only
height, width = rast.shape
number_lines = int(max(8, math.ceil(min(width, height) / 256.0)))
line_hor_offset = int(width / number_lines)
line_ver_offset = int(height / number_lines)
if line_hor_offset <= 2 or line_ver_offset <= 2:
log.ODM_WARNING("Cannot compute cutline, orthophoto is too small (%sx%spx)" % (width, height))
return
crop_f = fiona.open(crop_area_file, 'r')
if len(crop_f) == 0:
log.ODM_WARNING("Crop area is empty, cannot compute cutline")
return
crop_poly = shape(crop_f[1]['geometry'])
crop_f.close()
linestrings = []
# Compute canny edges on first band
edges = canny(rast)
def compute_linestrings(direction):
log.ODM_INFO("Computing %s cutlines" % direction)
# Initialize cost map
cost_map = np.full((height, width), 1, dtype=np.float32)
# Write edges to cost map
cost_map[edges==True] = 0 # Low cost
# Write "barrier, floor is lava" costs
if direction == 'vertical':
lines = [((i, 0), (i, height - 1)) for i in range(line_hor_offset, width - line_hor_offset, line_hor_offset)]
points = []
pad_x = int(line_hor_offset / 2.0)
for i in range(0, len(lines)):
a,b = lines[i]
points.append(((a[0] - pad_x , a[1]), (b[0] - pad_x, b[1])))
a,b = lines[-1]
points.append(((a[0] + pad_x , a[1]), (b[0] + pad_x, b[1])))
else:
lines = [((0, j), (width - 1, j)) for j in range(line_ver_offset, height - line_ver_offset, line_ver_offset)]
points = []
pad_y = int(line_ver_offset / 2.0)
for i in range(0, len(lines)):
a,b = lines[i]
points.append(((a[0] , a[1] - pad_y), (b[0], b[1] - pad_y)))
a,b = lines[-1]
points.append(((a[0] , a[1] + pad_y), (b[0], b[1] + pad_y)))
for a, b in lines:
rr,cc = line(*a, *b)
cost_map[cc, rr] = 9999 # Lava
# Calculate route
for a, b in points:
line_coords, cost = route_through_array(cost_map, (a[1], a[0]), (b[1], b[0]), fully_connected=True, geometric=True)
# Convert to geographic
geo_line_coords = [f.xy(*c) for c in line_coords]
# Simplify
ls = LineString(geo_line_coords)
linestrings.append(ls.simplify(0.05, preserve_topology=False))
compute_linestrings('vertical')
compute_linestrings('horizontal')
# Generate polygons and keep only those inside the crop area
log.ODM_INFO("Generating polygons... this could take a bit.")
polygons = []
for p in polygonize(unary_union(linestrings)):
if crop_poly.contains(p):
polygons.append(p)
# This should never happen
if len(polygons) == 0:
log.ODM_WARNING("No polygons, cannot compute cutline")
return
log.ODM_INFO("Merging polygons")
cutline_polygons = unary_union(polygons)
if not hasattr(cutline_polygons, '__getitem__'):
cutline_polygons = [cutline_polygons]
largest_cutline = cutline_polygons[0]
max_area = largest_cutline.area
for p in cutline_polygons:
if p.area > max_area:
max_area = p.area
largest_cutline = p
log.ODM_INFO("Largest cutline found: %s m^2" % max_area)
meta = {
'crs': {'init': str(f.crs).lower() },
'driver': 'GPKG',
'schema': {
'properties': {},
'geometry': 'Polygon'
}
}
# Remove previous
if os.path.exists(destination):
os.remove(destination)
with fiona.open(destination, 'w', **meta) as sink:
sink.write({
'geometry': mapping(largest_cutline),
'properties': {}
})
f.close()
log.ODM_INFO("Wrote %s" % destination)
# Cleanup
if scaled_orthophoto is not None and os.path.exists(scaled_orthophoto):
os.remove(scaled_orthophoto)
else:
log.ODM_WARNING("We've been asked to compute cutline, but either %s or %s is missing. Skipping..." % (orthophoto_file, crop_area_file))

View File

View File

@ -0,0 +1,354 @@
import os
import sys
import rasterio
import numpy
import math
import time
import shutil
from opendm.system import run
from opendm import point_cloud
from opendm import io
from opendm import system
from opendm.concurrency import get_max_memory, parallel_map
from scipy import ndimage
from datetime import datetime
from opendm.vendor.gdal_fillnodata import main as gdal_fillnodata
from opendm import log
try:
import Queue as queue
except:
import queue
import threading
from .ground_rectification.rectify import run_rectification
from . import pdal
try:
# GDAL >= 3.3
from osgeo_utils.gdal_proximity import main as gdal_proximity
except ModuleNotFoundError:
# GDAL <= 3.2
try:
from osgeo.utils.gdal_proximity import main as gdal_proximity
except:
pass
def classify(lasFile, scalar, slope, threshold, window, verbose=False):
start = datetime.now()
try:
pdal.run_pdaltranslate_smrf(lasFile, lasFile, scalar, slope, threshold, window, verbose)
except:
log.ODM_WARNING("Error creating classified file %s" % lasFile)
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
return lasFile
def rectify(lasFile, debug=False, reclassify_threshold=5, min_area=750, min_points=500):
start = datetime.now()
try:
# Currently, no Python 2 lib that supports reading and writing LAZ, so we will do it manually until ODM is migrated to Python 3
# When migration is done, we can move to pylas and avoid using PDAL for conversion
tempLasFile = os.path.join(os.path.dirname(lasFile), 'tmp.las')
# Convert LAZ to LAS
cmd = [
'pdal',
'translate',
'-i %s' % lasFile,
'-o %s' % tempLasFile
]
system.run(' '.join(cmd))
log.ODM_INFO("Rectifying {} using with [reclassify threshold: {}, min area: {}, min points: {}]".format(lasFile, reclassify_threshold, min_area, min_points))
run_rectification(
input=tempLasFile, output=tempLasFile, debug=debug, \
reclassify_plan='median', reclassify_threshold=reclassify_threshold, \
extend_plan='surrounding', extend_grid_distance=5, \
min_area=min_area, min_points=min_points)
# Convert LAS to LAZ
cmd = [
'pdal',
'translate',
'-i %s' % tempLasFile,
'-o %s' % lasFile
]
system.run(' '.join(cmd))
os.remove(tempLasFile)
except Exception as e:
raise Exception("Error rectifying ground in file %s: %s" % (lasFile, str(e)))
log.ODM_INFO('Created %s in %s' % (lasFile, datetime.now() - start))
return lasFile
error = None
def create_dem(input_point_cloud, dem_type, output_type='max', radiuses=['0.56'], gapfill=True,
outdir='', resolution=0.1, max_workers=1, max_tile_size=4096,
verbose=False, decimation=None, keep_unfilled_copy=False,
apply_smoothing=True):
""" Create DEM from multiple radii, and optionally gapfill """
global error
error = None
start = datetime.now()
if not os.path.exists(outdir):
log.ODM_INFO("Creating %s" % outdir)
os.mkdir(outdir)
extent = point_cloud.get_extent(input_point_cloud)
log.ODM_INFO("Point cloud bounds are [minx: %s, maxx: %s] [miny: %s, maxy: %s]" % (extent['minx'], extent['maxx'], extent['miny'], extent['maxy']))
ext_width = extent['maxx'] - extent['minx']
ext_height = extent['maxy'] - extent['miny']
w, h = (int(math.ceil(ext_width / float(resolution))),
int(math.ceil(ext_height / float(resolution))))
# Set a floor, no matter the resolution parameter
# (sometimes a wrongly estimated scale of the model can cause the resolution
# to be set unrealistically low, causing errors)
RES_FLOOR = 64
if w < RES_FLOOR and h < RES_FLOOR:
prev_w, prev_h = w, h
if w >= h:
w, h = (RES_FLOOR, int(math.ceil(ext_height / ext_width * RES_FLOOR)))
else:
w, h = (int(math.ceil(ext_width / ext_height * RES_FLOOR)), RES_FLOOR)
floor_ratio = prev_w / float(w)
resolution *= floor_ratio
radiuses = [str(float(r) * floor_ratio) for r in radiuses]
log.ODM_WARNING("Really low resolution DEM requested %s will set floor at %s pixels. Resolution changed to %s. The scale of this reconstruction might be off." % ((prev_w, prev_h), RES_FLOOR, resolution))
final_dem_pixels = w * h
num_splits = int(max(1, math.ceil(math.log(math.ceil(final_dem_pixels / float(max_tile_size * max_tile_size)))/math.log(2))))
num_tiles = num_splits * num_splits
log.ODM_INFO("DEM resolution is %s, max tile size is %s, will split DEM generation into %s tiles" % ((h, w), max_tile_size, num_tiles))
tile_bounds_width = ext_width / float(num_splits)
tile_bounds_height = ext_height / float(num_splits)
tiles = []
for r in radiuses:
minx = extent['minx']
for x in range(num_splits):
miny = extent['miny']
if x == num_splits - 1:
maxx = extent['maxx']
else:
maxx = minx + tile_bounds_width
for y in range(num_splits):
if y == num_splits - 1:
maxy = extent['maxy']
else:
maxy = miny + tile_bounds_height
filename = os.path.join(os.path.abspath(outdir), '%s_r%s_x%s_y%s.tif' % (dem_type, r, x, y))
tiles.append({
'radius': r,
'bounds': {
'minx': minx,
'maxx': maxx,
'miny': miny,
'maxy': maxy
},
'filename': filename
})
miny = maxy
minx = maxx
# Sort tiles by increasing radius
tiles.sort(key=lambda t: float(t['radius']), reverse=True)
def process_tile(q):
log.ODM_INFO("Generating %s (%s, radius: %s, resolution: %s)" % (q['filename'], output_type, q['radius'], resolution))
d = pdal.json_gdal_base(q['filename'], output_type, q['radius'], resolution, q['bounds'])
if dem_type == 'dtm':
d = pdal.json_add_classification_filter(d, 2)
if decimation is not None:
d = pdal.json_add_decimation_filter(d, decimation)
pdal.json_add_readers(d, [input_point_cloud])
pdal.run_pipeline(d, verbose=verbose)
parallel_map(process_tile, tiles, max_workers)
output_file = "%s.tif" % dem_type
output_path = os.path.abspath(os.path.join(outdir, output_file))
# Verify tile results
for t in tiles:
if not os.path.exists(t['filename']):
raise Exception("Error creating %s, %s failed to be created" % (output_file, t['filename']))
# Create virtual raster
tiles_vrt_path = os.path.abspath(os.path.join(outdir, "tiles.vrt"))
tiles_file_list = os.path.abspath(os.path.join(outdir, "tiles_list.txt"))
with open(tiles_file_list, 'w') as f:
for t in tiles:
f.write(t['filename'] + '\n')
run('gdalbuildvrt -input_file_list "%s" "%s" ' % (tiles_file_list, tiles_vrt_path))
merged_vrt_path = os.path.abspath(os.path.join(outdir, "merged.vrt"))
geotiff_tmp_path = os.path.abspath(os.path.join(outdir, 'tiles.tmp.tif'))
geotiff_small_path = os.path.abspath(os.path.join(outdir, 'tiles.small.tif'))
geotiff_small_filled_path = os.path.abspath(os.path.join(outdir, 'tiles.small_filled.tif'))
geotiff_path = os.path.abspath(os.path.join(outdir, 'tiles.tif'))
# Build GeoTIFF
kwargs = {
'max_memory': get_max_memory(),
'threads': max_workers if max_workers else 'ALL_CPUS',
'tiles_vrt': tiles_vrt_path,
'merged_vrt': merged_vrt_path,
'geotiff': geotiff_path,
'geotiff_tmp': geotiff_tmp_path,
'geotiff_small': geotiff_small_path,
'geotiff_small_filled': geotiff_small_filled_path
}
if gapfill:
# Sometimes, for some reason gdal_fillnodata.py
# behaves strangely when reading data directly from a .VRT
# so we need to convert to GeoTIFF first.
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co BIGTIFF=IF_SAFER '
'--config GDAL_CACHEMAX {max_memory}% '
'"{tiles_vrt}" "{geotiff_tmp}"'.format(**kwargs))
# Scale to 10% size
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co BIGTIFF=IF_SAFER '
'--config GDAL_CACHEMAX {max_memory}% '
'-outsize 10% 0 '
'"{geotiff_tmp}" "{geotiff_small}"'.format(**kwargs))
# Fill scaled
gdal_fillnodata(['.',
'-co', 'NUM_THREADS=%s' % kwargs['threads'],
'-co', 'BIGTIFF=IF_SAFER',
'--config', 'GDAL_CACHE_MAX', str(kwargs['max_memory']) + '%',
'-b', '1',
'-of', 'GTiff',
kwargs['geotiff_small'], kwargs['geotiff_small_filled']])
# Merge filled scaled DEM with unfilled DEM using bilinear interpolation
run('gdalbuildvrt -resolution highest -r bilinear "%s" "%s" "%s"' % (merged_vrt_path, geotiff_small_filled_path, geotiff_tmp_path))
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co BIGTIFF=IF_SAFER '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'"{merged_vrt}" "{geotiff}"'.format(**kwargs))
else:
run('gdal_translate '
'-co NUM_THREADS={threads} '
'-co TILED=YES '
'-co BIGTIFF=IF_SAFER '
'-co COMPRESS=DEFLATE '
'--config GDAL_CACHEMAX {max_memory}% '
'"{tiles_vrt}" "{geotiff}"'.format(**kwargs))
if apply_smoothing:
median_smoothing(geotiff_path, output_path)
os.remove(geotiff_path)
else:
os.replace(geotiff_path, output_path)
if os.path.exists(geotiff_tmp_path):
if not keep_unfilled_copy:
os.remove(geotiff_tmp_path)
else:
os.replace(geotiff_tmp_path, io.related_file_path(output_path, postfix=".unfilled"))
for cleanup_file in [tiles_vrt_path, tiles_file_list, merged_vrt_path, geotiff_small_path, geotiff_small_filled_path]:
if os.path.exists(cleanup_file): os.remove(cleanup_file)
for t in tiles:
if os.path.exists(t['filename']): os.remove(t['filename'])
log.ODM_INFO('Completed %s in %s' % (output_file, datetime.now() - start))
def compute_euclidean_map(geotiff_path, output_path, overwrite=False):
if not os.path.exists(geotiff_path):
log.ODM_WARNING("Cannot compute euclidean map (file does not exist: %s)" % geotiff_path)
return
nodata = -9999
with rasterio.open(geotiff_path) as f:
nodata = f.nodatavals[0]
if not os.path.exists(output_path) or overwrite:
log.ODM_INFO("Computing euclidean distance: %s" % output_path)
if gdal_proximity is not None:
try:
gdal_proximity(['gdal_proximity.py', geotiff_path, output_path, '-values', str(nodata)])
except Exception as e:
log.ODM_WARNING("Cannot compute euclidean distance: %s" % str(e))
if os.path.exists(output_path):
return output_path
else:
log.ODM_WARNING("Cannot compute euclidean distance file: %s" % output_path)
else:
log.ODM_WARNING("Cannot compute euclidean map, gdal_proximity is missing")
else:
log.ODM_INFO("Found a euclidean distance map: %s" % output_path)
return output_path
def median_smoothing(geotiff_path, output_path, smoothing_iterations=1):
""" Apply median smoothing """
start = datetime.now()
if not os.path.exists(geotiff_path):
raise Exception('File %s does not exist!' % geotiff_path)
log.ODM_INFO('Starting smoothing...')
with rasterio.open(geotiff_path) as img:
nodata = img.nodatavals[0]
dtype = img.dtypes[0]
arr = img.read()[0]
nodata_locs = arr == nodata
# Median filter (careful, changing the value 5 might require tweaking)
# the lines below. There's another numpy function that takes care of
# these edge cases, but it's slower.
for i in range(smoothing_iterations):
log.ODM_INFO("Smoothing iteration %s" % str(i + 1))
arr = ndimage.median_filter(arr, size=9, output=dtype, mode='nearest')
# Median filter leaves a bunch of zeros in nodata areas
arr[nodata_locs] = nodata
# write output
with rasterio.open(output_path, 'w', BIGTIFF="IF_SAFER", **img.profile) as imgout:
imgout.write(arr, 1)
log.ODM_INFO('Completed smoothing to create %s in %s' % (output_path, datetime.now() - start))
return output_path

View File

@ -0,0 +1,84 @@
import numpy as np
from scipy.spatial import Delaunay
from ..point_cloud import PointCloud
EPSILON = 0.00001
class PolyBounds(object):
def __init__(self, points):
self.__points = points
self.__delaunay = Delaunay(points)
[x_min, y_min] = np.amin(points, axis=0)
[x_max, y_max] = np.amax(points, axis=0)
self._corners = (x_min, x_max, y_min, y_max)
def keep_points_inside(self, point_cloud):
"""Return a new point cloud with the points from the given cloud that are inside the bounds"""
mask = self.calculate_mask(point_cloud)
return point_cloud[mask]
def percentage_of_points_inside(self, points):
if isinstance(points, PointCloud):
points = points.get_xy()
mask = self.calculate_mask(points)
return np.count_nonzero(mask) * 100 / points.shape[0]
def calculate_mask(self, points):
"""Calculate the mask that would filter out the points outside the bounds"""
if isinstance(points, PointCloud):
points = points.get_xy()
return self.__delaunay.find_simplex(points) >= 0
def center(self):
(x_min, x_max, y_min, y_max) = self._corners
return ((x_min + x_max) / 2, (y_min + y_max) / 2)
def corners(self):
return self._corners
class BoxBounds(object):
def __init__(self, x_min, x_max, y_min, y_max):
self._corners = (x_min, x_max, y_min, y_max)
def keep_points_inside(self, point_cloud):
"""Return a new point cloud with the points from the given cloud that are inside the bounds"""
mask = self.calculate_mask(point_cloud)
return point_cloud[mask]
def percentage_of_points_inside(self, points):
if isinstance(points, PointCloud):
points = points.get_xy()
mask = self.calculate_mask(points)
return np.count_nonzero(mask) * 100 / points.shape[0]
def calculate_mask(self, points):
"""Calculate the mask that would filter out the points outside the bounds"""
if isinstance(points, PointCloud):
points = points.get_xy()
(x_min, x_max, y_min, y_max) = self._corners
min = np.array([x_min, y_min])
max = np.array([x_max, y_max])
return np.all(np.logical_and(min <= points, points <= max), axis=1)
def center(self):
(x_min, x_max, y_min, y_max) = self._corners
return ((x_min + x_max) / 2, (y_min + y_max) / 2)
def corners(self):
return self._corners
def area(self):
(x_min, x_max, y_min, y_max) = self._corners
return (x_max - x_min) * (y_max - y_min)
def divide_by_point(self, point):
"""Divide the box into four boxes, marked by the point. It is assumed that the point is inside the box"""
[x_point, y_point] = point
(x_min, x_max, y_min, y_max) = self._corners
return [
BoxBounds(x_min, x_point, y_min, y_point),
BoxBounds(x_point + EPSILON, x_max, y_min, y_point),
BoxBounds(x_min, x_point, y_point + EPSILON, y_max),
BoxBounds(x_point + EPSILON, x_max, y_point + EPSILON, y_max)
]

View File

@ -0,0 +1,16 @@
import numpy as np
from scipy.spatial import ConvexHull
from .types import BoxBounds, PolyBounds
def calculate_convex_hull_bounds(points):
hull = ConvexHull(points)
return PolyBounds(points[hull.vertices])
def box_from_point_and_size(center, width, height):
return BoxBounds(center[0] - width / 2, center[0] + width / 2, center[1] - height / 2, center[1] + height / 2)
def box_from_cloud(point_cloud):
xy = point_cloud.get_xy()
[x_min, y_min] = np.amin(xy, axis=0)
[x_max, y_max] = np.amax(xy, axis=0)
return BoxBounds(x_min, x_max, y_min, y_max)

View File

@ -0,0 +1,27 @@
import numpy as np
from abc import ABCMeta, abstractmethod
class Dimension(object):
__metaclass__ = ABCMeta
def __init__(self):
super(Dimension, self).__init__()
@abstractmethod
def assign(self, *point_clouds, **kwargs):
"Assign a value to the points on the partition"
@abstractmethod
def assign_default(self, point_cloud):
"Assign a default value"
@abstractmethod
def get_name(self):
"Return the name of the dimension"
@abstractmethod
def get_las_type(self):
"Return the type of the values stored"
def _set_values(self, point_cloud, values):
point_cloud.add_dimension(self, values)

View File

@ -0,0 +1,45 @@
import numpy as np
from sklearn.linear_model import RANSACRegressor
from .dimension import Dimension
class DistanceDimension(Dimension):
"""Assign each point the distance to the estimated ground"""
def __init__(self):
super(DistanceDimension, self).__init__()
def assign_default(self, point_cloud):
default = np.full(point_cloud.len(), -1)
super(DistanceDimension, self)._set_values(point_cloud, default)
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
xy = point_cloud.get_xy()
# Calculate RANSCAC model
model = RANSACRegressor().fit(xy, point_cloud.get_z())
# Calculate angle between estimated plane and XY plane
angle = self.__calculate_angle(model)
if angle >= 45:
# If the angle is higher than 45 degrees, then don't calculate the difference, since it will probably be way off
diff = np.full(point_cloud.len(), 0)
else:
predicted = model.predict(xy)
diff = point_cloud.get_z() - predicted
# Ignore the diff when the diff is below the ground
diff[diff < 0] = 0
super(DistanceDimension, self)._set_values(point_cloud, diff)
def get_name(self):
return 'distance_to_ground'
def get_las_type(self):
return 10
def __calculate_angle(self, model):
"Calculate the angle between the estimated plane and the XY plane"
a = model.estimator_.coef_[0]
b = model.estimator_.coef_[1]
angle = np.arccos(1 / np.sqrt(a ** 2 + b ** 2 + 1))
return np.degrees(angle)

View File

@ -0,0 +1,23 @@
import numpy as np
from .dimension import Dimension
class ExtendedDimension(Dimension):
"""Whether the point was added or was already on the original point cloud"""
def __init__(self):
super(ExtendedDimension, self).__init__()
def assign_default(self, point_cloud):
default = np.full(point_cloud.len(), 0, dtype=np.uint16)
super(ExtendedDimension, self)._set_values(point_cloud, default)
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
added = np.full(point_cloud.len(), 1, dtype=np.uint16)
super(ExtendedDimension, self)._set_values(point_cloud, added)
def get_name(self):
return 'extended'
def get_las_type(self):
return 3

View File

@ -0,0 +1,25 @@
import numpy as np
from .dimension import Dimension
class PartitionDimension(Dimension):
"""Group points by partition"""
def __init__(self, name):
super(PartitionDimension, self).__init__()
self.counter = 1
self.name = name
def assign_default(self, point_cloud):
default = np.full(point_cloud.len(), 0)
super(PartitionDimension, self)._set_values(point_cloud, default)
def assign(self, *point_clouds, **kwargs):
for point_cloud in point_clouds:
super(PartitionDimension, self)._set_values(point_cloud, np.full(point_cloud.len(), self.counter))
self.counter += 1
def get_name(self):
return self.name
def get_las_type(self):
return 5

View File

@ -0,0 +1,32 @@
import numpy as np
from sklearn.neighbors import BallTree
EPSILON = 0.00001
def build_grid(bounds, point_cloud, distance):
"""First, a 2D grid is built with a distance of 'distance' between points, inside the given bounds.
Then, only points that don't have a point cloud neighbour closer than 'distance' are left. The rest are filtered out."""
# Generate a grid of 2D points inside the bounds, with a distance of 'distance' between them
grid = __build_grid(bounds, distance)
# Filter out grid points outside the bounds (makes sense if bounds are not squared)
grid_inside = bounds.keep_points_inside(grid)
# Filter out the grid points that have a neighbor closer than 'distance' from the given point cloud
return __calculate_lonely_points(grid_inside, point_cloud, distance)
def __build_grid(bounds, distance):
x_min, x_max, y_min, y_max = bounds.corners()
grid = [[x, y] for x in np.arange(x_min, x_max + distance, distance) for y in np.arange(y_min, y_max + distance, distance)]
return np.array(grid)
def __calculate_lonely_points(grid, point_cloud, distance):
# Generate BallTree for point cloud
ball_tree = BallTree(point_cloud.get_xy(), metric='manhattan')
# Calculate for each of the points in the grid, the amount of neighbors in the original ground cloud
count = ball_tree.query_radius(grid, distance - EPSILON, count_only=True)
# Return only the points in the grid that don't have a neighbor
return grid[count == 0]

View File

@ -0,0 +1,65 @@
# TODO: Move to pylas when project migrates to python3
from laspy.file import File
from laspy.header import Header
import numpy as np
from ..point_cloud import PointCloud
def read_cloud(point_cloud_path):
# Open point cloud and read its properties
las_file = File(point_cloud_path, mode='r')
header = (las_file.header.copy(), las_file.header.scale, las_file.header.offset,las_file.header.evlrs, las_file.header.vlrs)
[x_scale, y_scale, z_scale] = las_file.header.scale
[x_offset, y_offset, z_offset] = las_file.header.offset
# Calculate the real coordinates
x = las_file.X * x_scale + x_offset
y = las_file.Y * y_scale + y_offset
z = las_file.Z * z_scale + z_offset
cloud = PointCloud.with_dimensions(x, y, z, las_file.Classification, las_file.red, las_file.green, las_file.blue)
# Close the file
las_file.close()
# Return the result
return header, cloud
def write_cloud(header, point_cloud, output_point_cloud_path, write_extra_dimensions=False):
(h, scale, offset, evlrs, vlrs) = header
# Open output file
output_las_file = File(output_point_cloud_path, mode='w', header=h, evlrs=evlrs, vlrs=vlrs)
if write_extra_dimensions:
# Create new dimensions
for name, dimension in point_cloud.extra_dimensions_metadata.items():
output_las_file.define_new_dimension(name=name, data_type=dimension.get_las_type(), description="Dimension added by Ground Extend")
# Assign dimension values
for dimension_name, values in point_cloud.extra_dimensions.items():
setattr(output_las_file, dimension_name, values)
# Adapt points to scale and offset
[x_scale, y_scale, z_scale] = scale
[x_offset, y_offset, z_offset] = offset
[x, y] = np.hsplit(point_cloud.xy, 2)
output_las_file.X = (x.ravel() - x_offset) / x_scale
output_las_file.Y = (y.ravel() - y_offset) / y_scale
output_las_file.Z = (point_cloud.z - z_offset) / z_scale
# Set color
[red, green, blue] = np.hsplit(point_cloud.rgb, 3)
output_las_file.red = red.ravel()
output_las_file.green = green.ravel()
output_las_file.blue = blue.ravel()
# Set classification
output_las_file.Classification = point_cloud.classification.astype(np.uint8)
# Set header
output_las_file.header.scale = scale
output_las_file.header.offset = offset
# Close files
output_las_file.close()

View File

@ -0,0 +1,13 @@
from .partition_plan import PartitionPlan, Partition
from ..bounds.utils import box_from_cloud
class OnePartition(PartitionPlan):
"""This partition plan does nothing. It returns all the cloud points in one partition."""
def __init__(self, point_cloud):
super(OnePartition, self).__init__()
self.point_cloud = point_cloud
def execute(self, **kwargs):
bounds = box_from_cloud(self.point_cloud)
return [Partition(self.point_cloud, bounds=bounds)]

Some files were not shown because too many files have changed in this diff Show More