original development tree for Linux kernel GTP module; now long in mainline.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

599 lines
14 KiB

fs: Limit sys_mount to only request filesystem modules. Modify the request_module to prefix the file system type with "fs-" and add aliases to all of the filesystems that can be built as modules to match. A common practice is to build all of the kernel code and leave code that is not commonly needed as modules, with the result that many users are exposed to any bug anywhere in the kernel. Looking for filesystems with a fs- prefix limits the pool of possible modules that can be loaded by mount to just filesystems trivially making things safer with no real cost. Using aliases means user space can control the policy of which filesystem modules are auto-loaded by editing /etc/modprobe.d/*.conf with blacklist and alias directives. Allowing simple, safe, well understood work-arounds to known problematic software. This also addresses a rare but unfortunate problem where the filesystem name is not the same as it's module name and module auto-loading would not work. While writing this patch I saw a handful of such cases. The most significant being autofs that lives in the module autofs4. This is relevant to user namespaces because we can reach the request module in get_fs_type() without having any special permissions, and people get uncomfortable when a user specified string (in this case the filesystem type) goes all of the way to request_module. After having looked at this issue I don't think there is any particular reason to perform any filtering or permission checks beyond making it clear in the module request that we want a filesystem module. The common pattern in the kernel is to call request_module() without regards to the users permissions. In general all a filesystem module does once loaded is call register_filesystem() and go to sleep. Which means there is not much attack surface exposed by loading a filesytem module unless the filesystem is mounted. In a user namespace filesystems are not mounted unless .fs_flags = FS_USERNS_MOUNT, which most filesystems do not set today. Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Acked-by: Kees Cook <keescook@chromium.org> Reported-by: Kees Cook <keescook@google.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
9 years ago
  1. /*
  2. * Compressed rom filesystem for Linux.
  3. *
  4. * Copyright (C) 1999 Linus Torvalds.
  5. *
  6. * This file is released under the GPL.
  7. */
  8. /*
  9. * These are the VFS interfaces to the compressed rom filesystem.
  10. * The actual compression is based on zlib, see the other files.
  11. */
  12. #include <linux/module.h>
  13. #include <linux/fs.h>
  14. #include <linux/pagemap.h>
  15. #include <linux/init.h>
  16. #include <linux/string.h>
  17. #include <linux/blkdev.h>
  18. #include <linux/cramfs_fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/cramfs_fs_sb.h>
  21. #include <linux/vfs.h>
  22. #include <linux/mutex.h>
  23. #include <asm/uaccess.h>
  24. static const struct super_operations cramfs_ops;
  25. static const struct inode_operations cramfs_dir_inode_operations;
  26. static const struct file_operations cramfs_directory_operations;
  27. static const struct address_space_operations cramfs_aops;
  28. static DEFINE_MUTEX(read_mutex);
  29. /* These macros may change in future, to provide better st_ino semantics. */
  30. #define OFFSET(x) ((x)->i_ino)
  31. static unsigned long cramino(const struct cramfs_inode *cino, unsigned int offset)
  32. {
  33. if (!cino->offset)
  34. return offset + 1;
  35. if (!cino->size)
  36. return offset + 1;
  37. /*
  38. * The file mode test fixes buggy mkcramfs implementations where
  39. * cramfs_inode->offset is set to a non zero value for entries
  40. * which did not contain data, like devices node and fifos.
  41. */
  42. switch (cino->mode & S_IFMT) {
  43. case S_IFREG:
  44. case S_IFDIR:
  45. case S_IFLNK:
  46. return cino->offset << 2;
  47. default:
  48. break;
  49. }
  50. return offset + 1;
  51. }
  52. static struct inode *get_cramfs_inode(struct super_block *sb,
  53. const struct cramfs_inode *cramfs_inode, unsigned int offset)
  54. {
  55. struct inode *inode;
  56. static struct timespec zerotime;
  57. inode = iget_locked(sb, cramino(cramfs_inode, offset));
  58. if (!inode)
  59. return ERR_PTR(-ENOMEM);
  60. if (!(inode->i_state & I_NEW))
  61. return inode;
  62. switch (cramfs_inode->mode & S_IFMT) {
  63. case S_IFREG:
  64. inode->i_fop = &generic_ro_fops;
  65. inode->i_data.a_ops = &cramfs_aops;
  66. break;
  67. case S_IFDIR:
  68. inode->i_op = &cramfs_dir_inode_operations;
  69. inode->i_fop = &cramfs_directory_operations;
  70. break;
  71. case S_IFLNK:
  72. inode->i_op = &page_symlink_inode_operations;
  73. inode->i_data.a_ops = &cramfs_aops;
  74. break;
  75. default:
  76. init_special_inode(inode, cramfs_inode->mode,
  77. old_decode_dev(cramfs_inode->size));
  78. }
  79. inode->i_mode = cramfs_inode->mode;
  80. i_uid_write(inode, cramfs_inode->uid);
  81. i_gid_write(inode, cramfs_inode->gid);
  82. /* if the lower 2 bits are zero, the inode contains data */
  83. if (!(inode->i_ino & 3)) {
  84. inode->i_size = cramfs_inode->size;
  85. inode->i_blocks = (cramfs_inode->size - 1) / 512 + 1;
  86. }
  87. /* Struct copy intentional */
  88. inode->i_mtime = inode->i_atime = inode->i_ctime = zerotime;
  89. /* inode->i_nlink is left 1 - arguably wrong for directories,
  90. but it's the best we can do without reading the directory
  91. contents. 1 yields the right result in GNU find, even
  92. without -noleaf option. */
  93. unlock_new_inode(inode);
  94. return inode;
  95. }
  96. /*
  97. * We have our own block cache: don't fill up the buffer cache
  98. * with the rom-image, because the way the filesystem is set
  99. * up the accesses should be fairly regular and cached in the
  100. * page cache and dentry tree anyway..
  101. *
  102. * This also acts as a way to guarantee contiguous areas of up to
  103. * BLKS_PER_BUF*PAGE_CACHE_SIZE, so that the caller doesn't need to
  104. * worry about end-of-buffer issues even when decompressing a full
  105. * page cache.
  106. */
  107. #define READ_BUFFERS (2)
  108. /* NEXT_BUFFER(): Loop over [0..(READ_BUFFERS-1)]. */
  109. #define NEXT_BUFFER(_ix) ((_ix) ^ 1)
  110. /*
  111. * BLKS_PER_BUF_SHIFT should be at least 2 to allow for "compressed"
  112. * data that takes up more space than the original and with unlucky
  113. * alignment.
  114. */
  115. #define BLKS_PER_BUF_SHIFT (2)
  116. #define BLKS_PER_BUF (1 << BLKS_PER_BUF_SHIFT)
  117. #define BUFFER_SIZE (BLKS_PER_BUF*PAGE_CACHE_SIZE)
  118. static unsigned char read_buffers[READ_BUFFERS][BUFFER_SIZE];
  119. static unsigned buffer_blocknr[READ_BUFFERS];
  120. static struct super_block * buffer_dev[READ_BUFFERS];
  121. static int next_buffer;
  122. /*
  123. * Returns a pointer to a buffer containing at least LEN bytes of
  124. * filesystem starting at byte offset OFFSET into the filesystem.
  125. */
  126. static void *cramfs_read(struct super_block *sb, unsigned int offset, unsigned int len)
  127. {
  128. struct address_space *mapping = sb->s_bdev->bd_inode->i_mapping;
  129. struct page *pages[BLKS_PER_BUF];
  130. unsigned i, blocknr, buffer;
  131. unsigned long devsize;
  132. char *data;
  133. if (!len)
  134. return NULL;
  135. blocknr = offset >> PAGE_CACHE_SHIFT;
  136. offset &= PAGE_CACHE_SIZE - 1;
  137. /* Check if an existing buffer already has the data.. */
  138. for (i = 0; i < READ_BUFFERS; i++) {
  139. unsigned int blk_offset;
  140. if (buffer_dev[i] != sb)
  141. continue;
  142. if (blocknr < buffer_blocknr[i])
  143. continue;
  144. blk_offset = (blocknr - buffer_blocknr[i]) << PAGE_CACHE_SHIFT;
  145. blk_offset += offset;
  146. if (blk_offset + len > BUFFER_SIZE)
  147. continue;
  148. return read_buffers[i] + blk_offset;
  149. }
  150. devsize = mapping->host->i_size >> PAGE_CACHE_SHIFT;
  151. /* Ok, read in BLKS_PER_BUF pages completely first. */
  152. for (i = 0; i < BLKS_PER_BUF; i++) {
  153. struct page *page = NULL;
  154. if (blocknr + i < devsize) {
  155. page = read_mapping_page_async(mapping, blocknr + i,
  156. NULL);
  157. /* synchronous error? */
  158. if (IS_ERR(page))
  159. page = NULL;
  160. }
  161. pages[i] = page;
  162. }
  163. for (i = 0; i < BLKS_PER_BUF; i++) {
  164. struct page *page = pages[i];
  165. if (page) {
  166. wait_on_page_locked(page);
  167. if (!PageUptodate(page)) {
  168. /* asynchronous error */
  169. page_cache_release(page);
  170. pages[i] = NULL;
  171. }
  172. }
  173. }
  174. buffer = next_buffer;
  175. next_buffer = NEXT_BUFFER(buffer);
  176. buffer_blocknr[buffer] = blocknr;
  177. buffer_dev[buffer] = sb;
  178. data = read_buffers[buffer];
  179. for (i = 0; i < BLKS_PER_BUF; i++) {
  180. struct page *page = pages[i];
  181. if (page) {
  182. memcpy(data, kmap(page), PAGE_CACHE_SIZE);
  183. kunmap(page);
  184. page_cache_release(page);
  185. } else
  186. memset(data, 0, PAGE_CACHE_SIZE);
  187. data += PAGE_CACHE_SIZE;
  188. }
  189. return read_buffers[buffer] + offset;
  190. }
  191. static void cramfs_put_super(struct super_block *sb)
  192. {
  193. kfree(sb->s_fs_info);
  194. sb->s_fs_info = NULL;
  195. }
  196. static int cramfs_remount(struct super_block *sb, int *flags, char *data)
  197. {
  198. *flags |= MS_RDONLY;
  199. return 0;
  200. }
  201. static int cramfs_fill_super(struct super_block *sb, void *data, int silent)
  202. {
  203. int i;
  204. struct cramfs_super super;
  205. unsigned long root_offset;
  206. struct cramfs_sb_info *sbi;
  207. struct inode *root;
  208. sb->s_flags |= MS_RDONLY;
  209. sbi = kzalloc(sizeof(struct cramfs_sb_info), GFP_KERNEL);
  210. if (!sbi)
  211. return -ENOMEM;
  212. sb->s_fs_info = sbi;
  213. /* Invalidate the read buffers on mount: think disk change.. */
  214. mutex_lock(&read_mutex);
  215. for (i = 0; i < READ_BUFFERS; i++)
  216. buffer_blocknr[i] = -1;
  217. /* Read the first block and get the superblock from it */
  218. memcpy(&super, cramfs_read(sb, 0, sizeof(super)), sizeof(super));
  219. mutex_unlock(&read_mutex);
  220. /* Do sanity checks on the superblock */
  221. if (super.magic != CRAMFS_MAGIC) {
  222. /* check for wrong endianness */
  223. if (super.magic == CRAMFS_MAGIC_WEND) {
  224. if (!silent)
  225. printk(KERN_ERR "cramfs: wrong endianness\n");
  226. goto out;
  227. }
  228. /* check at 512 byte offset */
  229. mutex_lock(&read_mutex);
  230. memcpy(&super, cramfs_read(sb, 512, sizeof(super)), sizeof(super));
  231. mutex_unlock(&read_mutex);
  232. if (super.magic != CRAMFS_MAGIC) {
  233. if (super.magic == CRAMFS_MAGIC_WEND && !silent)
  234. printk(KERN_ERR "cramfs: wrong endianness\n");
  235. else if (!silent)
  236. printk(KERN_ERR "cramfs: wrong magic\n");
  237. goto out;
  238. }
  239. }
  240. /* get feature flags first */
  241. if (super.flags & ~CRAMFS_SUPPORTED_FLAGS) {
  242. printk(KERN_ERR "cramfs: unsupported filesystem features\n");
  243. goto out;
  244. }
  245. /* Check that the root inode is in a sane state */
  246. if (!S_ISDIR(super.root.mode)) {
  247. printk(KERN_ERR "cramfs: root is not a directory\n");
  248. goto out;
  249. }
  250. /* correct strange, hard-coded permissions of mkcramfs */
  251. super.root.mode |= (S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP | S_IROTH | S_IXOTH);
  252. root_offset = super.root.offset << 2;
  253. if (super.flags & CRAMFS_FLAG_FSID_VERSION_2) {
  254. sbi->size=super.size;
  255. sbi->blocks=super.fsid.blocks;
  256. sbi->files=super.fsid.files;
  257. } else {
  258. sbi->size=1<<28;
  259. sbi->blocks=0;
  260. sbi->files=0;
  261. }
  262. sbi->magic=super.magic;
  263. sbi->flags=super.flags;
  264. if (root_offset == 0)
  265. printk(KERN_INFO "cramfs: empty filesystem");
  266. else if (!(super.flags & CRAMFS_FLAG_SHIFTED_ROOT_OFFSET) &&
  267. ((root_offset != sizeof(struct cramfs_super)) &&
  268. (root_offset != 512 + sizeof(struct cramfs_super))))
  269. {
  270. printk(KERN_ERR "cramfs: bad root offset %lu\n", root_offset);
  271. goto out;
  272. }
  273. /* Set it all up.. */
  274. sb->s_op = &cramfs_ops;
  275. root = get_cramfs_inode(sb, &super.root, 0);
  276. if (IS_ERR(root))
  277. goto out;
  278. sb->s_root = d_make_root(root);
  279. if (!sb->s_root)
  280. goto out;
  281. return 0;
  282. out:
  283. kfree(sbi);
  284. sb->s_fs_info = NULL;
  285. return -EINVAL;
  286. }
  287. static int cramfs_statfs(struct dentry *dentry, struct kstatfs *buf)
  288. {
  289. struct super_block *sb = dentry->d_sb;
  290. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  291. buf->f_type = CRAMFS_MAGIC;
  292. buf->f_bsize = PAGE_CACHE_SIZE;
  293. buf->f_blocks = CRAMFS_SB(sb)->blocks;
  294. buf->f_bfree = 0;
  295. buf->f_bavail = 0;
  296. buf->f_files = CRAMFS_SB(sb)->files;
  297. buf->f_ffree = 0;
  298. buf->f_fsid.val[0] = (u32)id;
  299. buf->f_fsid.val[1] = (u32)(id >> 32);
  300. buf->f_namelen = CRAMFS_MAXPATHLEN;
  301. return 0;
  302. }
  303. /*
  304. * Read a cramfs directory entry.
  305. */
  306. static int cramfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
  307. {
  308. struct inode *inode = file_inode(filp);
  309. struct super_block *sb = inode->i_sb;
  310. char *buf;
  311. unsigned int offset;
  312. int copied;
  313. /* Offset within the thing. */
  314. offset = filp->f_pos;
  315. if (offset >= inode->i_size)
  316. return 0;
  317. /* Directory entries are always 4-byte aligned */
  318. if (offset & 3)
  319. return -EINVAL;
  320. buf = kmalloc(CRAMFS_MAXPATHLEN, GFP_KERNEL);
  321. if (!buf)
  322. return -ENOMEM;
  323. copied = 0;
  324. while (offset < inode->i_size) {
  325. struct cramfs_inode *de;
  326. unsigned long nextoffset;
  327. char *name;
  328. ino_t ino;
  329. umode_t mode;
  330. int namelen, error;
  331. mutex_lock(&read_mutex);
  332. de = cramfs_read(sb, OFFSET(inode) + offset, sizeof(*de)+CRAMFS_MAXPATHLEN);
  333. name = (char *)(de+1);
  334. /*
  335. * Namelengths on disk are shifted by two
  336. * and the name padded out to 4-byte boundaries
  337. * with zeroes.
  338. */
  339. namelen = de->namelen << 2;
  340. memcpy(buf, name, namelen);
  341. ino = cramino(de, OFFSET(inode) + offset);
  342. mode = de->mode;
  343. mutex_unlock(&read_mutex);
  344. nextoffset = offset + sizeof(*de) + namelen;
  345. for (;;) {
  346. if (!namelen) {
  347. kfree(buf);
  348. return -EIO;
  349. }
  350. if (buf[namelen-1])
  351. break;
  352. namelen--;
  353. }
  354. error = filldir(dirent, buf, namelen, offset, ino, mode >> 12);
  355. if (error)
  356. break;
  357. offset = nextoffset;
  358. filp->f_pos = offset;
  359. copied++;
  360. }
  361. kfree(buf);
  362. return 0;
  363. }
  364. /*
  365. * Lookup and fill in the inode data..
  366. */
  367. static struct dentry * cramfs_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
  368. {
  369. unsigned int offset = 0;
  370. struct inode *inode = NULL;
  371. int sorted;
  372. mutex_lock(&read_mutex);
  373. sorted = CRAMFS_SB(dir->i_sb)->flags & CRAMFS_FLAG_SORTED_DIRS;
  374. while (offset < dir->i_size) {
  375. struct cramfs_inode *de;
  376. char *name;
  377. int namelen, retval;
  378. int dir_off = OFFSET(dir) + offset;
  379. de = cramfs_read(dir->i_sb, dir_off, sizeof(*de)+CRAMFS_MAXPATHLEN);
  380. name = (char *)(de+1);
  381. /* Try to take advantage of sorted directories */
  382. if (sorted && (dentry->d_name.name[0] < name[0]))
  383. break;
  384. namelen = de->namelen << 2;
  385. offset += sizeof(*de) + namelen;
  386. /* Quick check that the name is roughly the right length */
  387. if (((dentry->d_name.len + 3) & ~3) != namelen)
  388. continue;
  389. for (;;) {
  390. if (!namelen) {
  391. inode = ERR_PTR(-EIO);
  392. goto out;
  393. }
  394. if (name[namelen-1])
  395. break;
  396. namelen--;
  397. }
  398. if (namelen != dentry->d_name.len)
  399. continue;
  400. retval = memcmp(dentry->d_name.name, name, namelen);
  401. if (retval > 0)
  402. continue;
  403. if (!retval) {
  404. inode = get_cramfs_inode(dir->i_sb, de, dir_off);
  405. break;
  406. }
  407. /* else (retval < 0) */
  408. if (sorted)
  409. break;
  410. }
  411. out:
  412. mutex_unlock(&read_mutex);
  413. if (IS_ERR(inode))
  414. return ERR_CAST(inode);
  415. d_add(dentry, inode);
  416. return NULL;
  417. }
  418. static int cramfs_readpage(struct file *file, struct page * page)
  419. {
  420. struct inode *inode = page->mapping->host;
  421. u32 maxblock;
  422. int bytes_filled;
  423. void *pgdata;
  424. maxblock = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  425. bytes_filled = 0;
  426. pgdata = kmap(page);
  427. if (page->index < maxblock) {
  428. struct super_block *sb = inode->i_sb;
  429. u32 blkptr_offset = OFFSET(inode) + page->index*4;
  430. u32 start_offset, compr_len;
  431. start_offset = OFFSET(inode) + maxblock*4;
  432. mutex_lock(&read_mutex);
  433. if (page->index)
  434. start_offset = *(u32 *) cramfs_read(sb, blkptr_offset-4,
  435. 4);
  436. compr_len = (*(u32 *) cramfs_read(sb, blkptr_offset, 4) -
  437. start_offset);
  438. mutex_unlock(&read_mutex);
  439. if (compr_len == 0)
  440. ; /* hole */
  441. else if (unlikely(compr_len > (PAGE_CACHE_SIZE << 1))) {
  442. pr_err("cramfs: bad compressed blocksize %u\n",
  443. compr_len);
  444. goto err;
  445. } else {
  446. mutex_lock(&read_mutex);
  447. bytes_filled = cramfs_uncompress_block(pgdata,
  448. PAGE_CACHE_SIZE,
  449. cramfs_read(sb, start_offset, compr_len),
  450. compr_len);
  451. mutex_unlock(&read_mutex);
  452. if (unlikely(bytes_filled < 0))
  453. goto err;
  454. }
  455. }
  456. memset(pgdata + bytes_filled, 0, PAGE_CACHE_SIZE - bytes_filled);
  457. flush_dcache_page(page);
  458. kunmap(page);
  459. SetPageUptodate(page);
  460. unlock_page(page);
  461. return 0;
  462. err:
  463. kunmap(page);
  464. ClearPageUptodate(page);
  465. SetPageError(page);
  466. unlock_page(page);
  467. return 0;
  468. }
  469. static const struct address_space_operations cramfs_aops = {
  470. .readpage = cramfs_readpage
  471. };
  472. /*
  473. * Our operations:
  474. */
  475. /*
  476. * A directory can only readdir
  477. */
  478. static const struct file_operations cramfs_directory_operations = {
  479. .llseek = generic_file_llseek,
  480. .read = generic_read_dir,
  481. .readdir = cramfs_readdir,
  482. };
  483. static const struct inode_operations cramfs_dir_inode_operations = {
  484. .lookup = cramfs_lookup,
  485. };
  486. static const struct super_operations cramfs_ops = {
  487. .put_super = cramfs_put_super,
  488. .remount_fs = cramfs_remount,
  489. .statfs = cramfs_statfs,
  490. };
  491. static struct dentry *cramfs_mount(struct file_system_type *fs_type,
  492. int flags, const char *dev_name, void *data)
  493. {
  494. return mount_bdev(fs_type, flags, dev_name, data, cramfs_fill_super);
  495. }
  496. static struct file_system_type cramfs_fs_type = {
  497. .owner = THIS_MODULE,
  498. .name = "cramfs",
  499. .mount = cramfs_mount,
  500. .kill_sb = kill_block_super,
  501. .fs_flags = FS_REQUIRES_DEV,
  502. };
  503. MODULE_ALIAS_FS("cramfs");
  504. static int __init init_cramfs_fs(void)
  505. {
  506. int rv;
  507. rv = cramfs_uncompress_init();
  508. if (rv < 0)
  509. return rv;
  510. rv = register_filesystem(&cramfs_fs_type);
  511. if (rv < 0)
  512. cramfs_uncompress_exit();
  513. return rv;
  514. }
  515. static void __exit exit_cramfs_fs(void)
  516. {
  517. cramfs_uncompress_exit();
  518. unregister_filesystem(&cramfs_fs_type);
  519. }
  520. module_init(init_cramfs_fs)
  521. module_exit(exit_cramfs_fs)
  522. MODULE_LICENSE("GPL");