ulimit

UNIX provides the getrlimit and setrlimit system calls to limit the resources available to processes, and most shells provide a command—such as ulimit—to provide an interface to these settings. Running ulimit -a using a modern Bourne shell will print the current limitations:

$ ulimit -a
core file size          (blocks, -c) unlimited
data seg size           (kbytes, -d) unlimited
scheduling priority             (-e) 0
file size               (blocks, -f) unlimited
pending signals                 (-i) 11724
max locked memory       (kbytes, -l) 64
max memory size         (kbytes, -m) unlimited
open files                      (-n) 1024
pipe size            (512 bytes, -p) 8
POSIX message queues     (bytes, -q) 819200
real-time priority              (-r) 0
stack size              (kbytes, -s) 8192
cpu time               (seconds, -t) unlimited
max user processes              (-u) 11724
virtual memory          (kbytes, -v) unlimited
file locks                      (-x) unlimited

The output above included resource categories (e.g., file size), an optional unit (e.g., blocks), the flag used to adjust the resource limit (e.g., -f), and the current limit (e.g., unlimited). Limiting the size of the file that can be created by any process run by the current shell to 1,024 blocks is a matter of running:

$ ulimit -f 1024

Similar commands can limit the other resource categories.

The following program is useful for testing one’s understanding of ulimit. Assuming you save this to a file named ulimitest.c, you can compile this program with cc -o ulimitest ulimitest.c. Inspecting the source should give you an indication of what resources the program uses. The For example, lots_of_files functions simultaneously opens 128 files; this would exceed the limit set by ulimit -n 64.

#include <sys/stat.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <fcntl.h>
#include <signal.h>
#include <stdbool.h>
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>

/* Test -d, data segment size. */
unsigned char buf1[1024 * 1024];

void
recurse(int n)
{
	if (n-- > 1) {
		recurse(n);
	}
}

void
big_file(void)
{
	FILE *f;
	size_t rc;

	f = tmpfile();
	if (NULL == f) {
		perror("error creating temporary file");
		exit(EXIT_FAILURE);
	}

	rc = fwrite(buf1, 1, sizeof buf1, f);	
	if (rc != sizeof buf1) {
		perror("error writing 1 MB to file");
		exit(EXIT_FAILURE);
	}

	fclose(f);
}

void
lots_of_processes(void)
{
	/* NOTE: parent makes 128. */
	pid_t c[127], rc, fail = false;

	for (int i = 0; i < sizeof c / sizeof *c; i++) {
		c[i] = fork();
		switch (c[i]) {
		case -1:
			/* Parent (error). */
			perror("error forking");
			fail = true;
			goto done;
		case 0:
			/* Child. */
			sleep(100);
			exit(EXIT_SUCCESS);
		default:
			/* Parent. */
			{}
		}
	}

done:
	for (int i = 0; i < sizeof c / sizeof *c; i++) {
		kill(c[i], SIGKILL);
	}

	for (int i = 0; i < sizeof c / sizeof *c; i++) {
		rc = waitpid(c[i], NULL, 0);
		if (-1 == rc) {
			perror("error waiting for process");
			exit(EXIT_FAILURE);
		} else if (rc != c[i]) {
			fprintf(stderr, "waited on wrong process\n");
			exit(EXIT_FAILURE);
		}
	}

	if (fail) {
		exit(EXIT_FAILURE);
	}
}

void
lots_of_files(void)
{
	int f[128];

	for (int i = 0; i < sizeof f / sizeof *f; i++) {
		f[i] = open("/dev/null", O_RDONLY);

		if (-1 == f[i]) {
			perror("error opening file");
			exit(EXIT_FAILURE);
		}
	}

	for (int i = 0; i < sizeof f / sizeof *f; i++) {
		close(f[i]);
	}
}

int
main(void)
{
	/* Test -s, stack size. */
	recurse(128);

	/* Test -f, maximum file size. */
	big_file();

	/* Test -m, maximum memory size. */
	unsigned char buf2[1024 * 1024];

	/* Test -n, open files. */
	lots_of_files();

	/* Test -u, open files. */
	lots_of_processes();

	printf("finished\n");

	exit(EXIT_SUCCESS);
}

The -u limit refers to threads rather than strictly processes, and the limit applies to every program the user has running. Ulimitest will try to simultaneously run 128 processes, but you likely already have a number of processes running (to include your shell). Use ps -eLf | grep $USER | wc -l to count the number of threads you are presently running, and take this into account when setting a limit.

My research interests include free and open source software, system security, and network security.