diff --git a/h/HyperQueue/HyperQueue-0.17.0.eb b/h/HyperQueue/HyperQueue-0.17.0.eb
new file mode 100644
index 00000000..3e11d36a
--- /dev/null
+++ b/h/HyperQueue/HyperQueue-0.17.0.eb
@@ -0,0 +1,28 @@
+# IT4Innovations
+# LK 2023
+
+easyblock = 'PackedBinary'
+
+name = 'HyperQueue'
+version = '0.17.0'
+
+homepage = 'https://it4innovations.github.io/hyperqueue/'
+description = """HyperQueue lets you build a computation plan consisting of a large amount of tasks
+ and then execute it transparently over a system like SLURM/PBS. It dynamically groups jobs into SLURM/PBS jobs
+ and distributes them to fully utilize allocated notes.
+ You thus do not have to manually aggregate your tasks into SLURM/PBS jobs."""
+
+toolchain = SYSTEM
+
+source_urls = ['https://github.com/It4innovations/hyperqueue/releases/download/v%(version)s/']
+sources = ['hq-v%(version)s-linux-x64.tar.gz']
+checksums = ['0f96dd47e7bf80f6be5d38dfb6e1e738e7713113dbb73fa14090ec4c3992a6bb']
+
+sanity_check_paths = {
+    'files': ['hq'],
+    'dirs': [],
+}
+
+sanity_check_commands = ['hq --version']
+
+moduleclass = 'devel'
diff --git a/o/OpenMPI/OpenMPI-4.1.6-GCC-13.2.0.eb b/o/OpenMPI/OpenMPI-4.1.6-GCC-13.2.0.eb
new file mode 100644
index 00000000..828c6ee0
--- /dev/null
+++ b/o/OpenMPI/OpenMPI-4.1.6-GCC-13.2.0.eb
@@ -0,0 +1,83 @@
+# IT4Innovations
+# LK 2023
+
+name = 'OpenMPI'
+version = '4.1.6'
+
+homepage = 'https://www.open-mpi.org/'
+description = """The Open MPI Project is an open source MPI-3 implementation."""
+
+toolchain = {'name': 'GCC', 'version': '13.2.0'}
+
+source_urls = ['https://www.open-mpi.org/software/ompi/v%(version_major_minor)s/downloads']
+sources = [SOURCELOWER_TAR_BZ2]
+patches = [
+    'OpenMPI-4.1.1_build-with-internal-cuda-header.patch',
+    'OpenMPI-4.1.1_opal-datatype-cuda-performance.patch',
+]
+checksums = [
+    {'openmpi-4.1.6.tar.bz2': 'f740994485516deb63b5311af122c265179f5328a0d857a567b85db00b11e415'},
+    {'OpenMPI-4.1.1_build-with-internal-cuda-header.patch':
+     '63eac52736bdf7644c480362440a7f1f0ae7c7cae47b7565f5635c41793f8c83'},
+    {'OpenMPI-4.1.1_opal-datatype-cuda-performance.patch':
+     'b767c7166cf0b32906132d58de5439c735193c9fd09ec3c5c11db8d5fa68750e'},
+]
+
+builddependencies = [
+    ('pkgconf', '2.0.3'),
+    ('Perl', '5.38.0'),
+    ('Autotools', '20220317'),
+]
+
+dependencies = [
+    ('zlib', '1.2.13'),
+    ('hwloc', '2.9.2'),
+    ('libevent', '2.1.12'),
+    ('UCX', '1.15.0'),
+    ('libfabric', '1.19.0'),
+    ('PMIx', '4.2.6'),
+    ('UCC', '1.2.0'),
+]
+
+# Update configure to include changes from the "internal-cuda" patch
+# by running a subset of autogen.pl sufficient to achieve this
+# without doing the full, long-running regeneration.
+preconfigopts = ' && '.join([
+    'cd config',
+    'autom4te --language=m4sh opal_get_version.m4sh -o opal_get_version.sh',
+    'cd ..',
+    'autoconf',
+    'autoheader',
+    'aclocal',
+    'automake',
+    ''
+])
+
+# CUDA related patches and custom configure option can be removed if CUDA support isn't wanted.
+configopts = '--with-cuda=internal '
+configopts += '--enable-shared --enable-mpi-thread-multiple --with-verbs '
+configopts += '--enable-mpirun-prefix-by-default '
+configopts += '--with-hwloc=$EBROOTHWLOC '  # hwloc support
+configopts += '--with-slurm ' # Enable slurm
+configopts += '--enable-mpi-cxx '  # Enable building the C++ MPI bindings
+configopts += '--with-ucx=$EBROOTUCX '
+
+import os
+if os.environ.get("CLUSTERNAME") in ["BARBORA"]:
+   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
+                'OMPI_MCA_btl_tcp_if_include': '10.33.4.0/24',
+                'OMPI_MCA_orte_base_help_aggregate': '0',
+                'SLURM_MPI_TYPE': 'pmix_v4',
+   }
+elif os.environ.get("CLUSTERNAME") in ["KAROLINA"]:
+   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx5_0',
+                'OMPI_MCA_orte_base_help_aggregate': '0',
+                'SLURM_MPI_TYPE': 'pmix_v4',
+   }
+else:
+   modextravars = {'OMPI_MCA_btl_openib_if_include': 'mlx4_0',
+                'OMPI_MCA_oob_tcp_if_include': '10.0.0.0/8',
+                'SLURM_MPI_TYPE': 'pmix_v4',
+   }
+
+moduleclass = 'mpi'