original development tree for Linux kernel GTP module; now long in mainline.
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

444 lines
11 KiB

fs: Limit sys_mount to only request filesystem modules. Modify the request_module to prefix the file system type with "fs-" and add aliases to all of the filesystems that can be built as modules to match. A common practice is to build all of the kernel code and leave code that is not commonly needed as modules, with the result that many users are exposed to any bug anywhere in the kernel. Looking for filesystems with a fs- prefix limits the pool of possible modules that can be loaded by mount to just filesystems trivially making things safer with no real cost. Using aliases means user space can control the policy of which filesystem modules are auto-loaded by editing /etc/modprobe.d/*.conf with blacklist and alias directives. Allowing simple, safe, well understood work-arounds to known problematic software. This also addresses a rare but unfortunate problem where the filesystem name is not the same as it's module name and module auto-loading would not work. While writing this patch I saw a handful of such cases. The most significant being autofs that lives in the module autofs4. This is relevant to user namespaces because we can reach the request module in get_fs_type() without having any special permissions, and people get uncomfortable when a user specified string (in this case the filesystem type) goes all of the way to request_module. After having looked at this issue I don't think there is any particular reason to perform any filtering or permission checks beyond making it clear in the module request that we want a filesystem module. The common pattern in the kernel is to call request_module() without regards to the users permissions. In general all a filesystem module does once loaded is call register_filesystem() and go to sleep. Which means there is not much attack surface exposed by loading a filesytem module unless the filesystem is mounted. In a user namespace filesystems are not mounted unless .fs_flags = FS_USERNS_MOUNT, which most filesystems do not set today. Acked-by: Serge Hallyn <serge.hallyn@canonical.com> Acked-by: Kees Cook <keescook@chromium.org> Reported-by: Kees Cook <keescook@google.com> Signed-off-by: "Eric W. Biederman" <ebiederm@xmission.com>
9 years ago
  1. /*
  2. * QNX4 file system, Linux implementation.
  3. *
  4. * Version : 0.2.1
  5. *
  6. * Using parts of the xiafs filesystem.
  7. *
  8. * History :
  9. *
  10. * 01-06-1998 by Richard Frowijn : first release.
  11. * 20-06-1998 by Frank Denis : Linux 2.1.99+ support, boot signature, misc.
  12. * 30-06-1998 by Frank Denis : first step to write inodes.
  13. */
  14. #include <linux/module.h>
  15. #include <linux/init.h>
  16. #include <linux/slab.h>
  17. #include <linux/highuid.h>
  18. #include <linux/pagemap.h>
  19. #include <linux/buffer_head.h>
  20. #include <linux/writeback.h>
  21. #include <linux/statfs.h>
  22. #include "qnx4.h"
  23. #define QNX4_VERSION 4
  24. #define QNX4_BMNAME ".bitmap"
  25. static const struct super_operations qnx4_sops;
  26. static void qnx4_put_super(struct super_block *sb);
  27. static struct inode *qnx4_alloc_inode(struct super_block *sb);
  28. static void qnx4_destroy_inode(struct inode *inode);
  29. static int qnx4_remount(struct super_block *sb, int *flags, char *data);
  30. static int qnx4_statfs(struct dentry *, struct kstatfs *);
  31. static const struct super_operations qnx4_sops =
  32. {
  33. .alloc_inode = qnx4_alloc_inode,
  34. .destroy_inode = qnx4_destroy_inode,
  35. .put_super = qnx4_put_super,
  36. .statfs = qnx4_statfs,
  37. .remount_fs = qnx4_remount,
  38. };
  39. static int qnx4_remount(struct super_block *sb, int *flags, char *data)
  40. {
  41. struct qnx4_sb_info *qs;
  42. qs = qnx4_sb(sb);
  43. qs->Version = QNX4_VERSION;
  44. *flags |= MS_RDONLY;
  45. return 0;
  46. }
  47. static int qnx4_get_block( struct inode *inode, sector_t iblock, struct buffer_head *bh, int create )
  48. {
  49. unsigned long phys;
  50. QNX4DEBUG((KERN_INFO "qnx4: qnx4_get_block inode=[%ld] iblock=[%ld]\n",inode->i_ino,iblock));
  51. phys = qnx4_block_map( inode, iblock );
  52. if ( phys ) {
  53. // logical block is before EOF
  54. map_bh(bh, inode->i_sb, phys);
  55. }
  56. return 0;
  57. }
  58. static inline u32 try_extent(qnx4_xtnt_t *extent, u32 *offset)
  59. {
  60. u32 size = le32_to_cpu(extent->xtnt_size);
  61. if (*offset < size)
  62. return le32_to_cpu(extent->xtnt_blk) + *offset - 1;
  63. *offset -= size;
  64. return 0;
  65. }
  66. unsigned long qnx4_block_map( struct inode *inode, long iblock )
  67. {
  68. int ix;
  69. long i_xblk;
  70. struct buffer_head *bh = NULL;
  71. struct qnx4_xblk *xblk = NULL;
  72. struct qnx4_inode_entry *qnx4_inode = qnx4_raw_inode(inode);
  73. u16 nxtnt = le16_to_cpu(qnx4_inode->di_num_xtnts);
  74. u32 offset = iblock;
  75. u32 block = try_extent(&qnx4_inode->di_first_xtnt, &offset);
  76. if (block) {
  77. // iblock is in the first extent. This is easy.
  78. } else {
  79. // iblock is beyond first extent. We have to follow the extent chain.
  80. i_xblk = le32_to_cpu(qnx4_inode->di_xblk);
  81. ix = 0;
  82. while ( --nxtnt > 0 ) {
  83. if ( ix == 0 ) {
  84. // read next xtnt block.
  85. bh = sb_bread(inode->i_sb, i_xblk - 1);
  86. if ( !bh ) {
  87. QNX4DEBUG((KERN_ERR "qnx4: I/O error reading xtnt block [%ld])\n", i_xblk - 1));
  88. return -EIO;
  89. }
  90. xblk = (struct qnx4_xblk*)bh->b_data;
  91. if ( memcmp( xblk->xblk_signature, "IamXblk", 7 ) ) {
  92. QNX4DEBUG((KERN_ERR "qnx4: block at %ld is not a valid xtnt\n", qnx4_inode->i_xblk));
  93. return -EIO;
  94. }
  95. }
  96. block = try_extent(&xblk->xblk_xtnts[ix], &offset);
  97. if (block) {
  98. // got it!
  99. break;
  100. }
  101. if ( ++ix >= xblk->xblk_num_xtnts ) {
  102. i_xblk = le32_to_cpu(xblk->xblk_next_xblk);
  103. ix = 0;
  104. brelse( bh );
  105. bh = NULL;
  106. }
  107. }
  108. if ( bh )
  109. brelse( bh );
  110. }
  111. QNX4DEBUG((KERN_INFO "qnx4: mapping block %ld of inode %ld = %ld\n",iblock,inode->i_ino,block));
  112. return block;
  113. }
  114. static int qnx4_statfs(struct dentry *dentry, struct kstatfs *buf)
  115. {
  116. struct super_block *sb = dentry->d_sb;
  117. u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
  118. buf->f_type = sb->s_magic;
  119. buf->f_bsize = sb->s_blocksize;
  120. buf->f_blocks = le32_to_cpu(qnx4_sb(sb)->BitMap->di_size) * 8;
  121. buf->f_bfree = qnx4_count_free_blocks(sb);
  122. buf->f_bavail = buf->f_bfree;
  123. buf->f_namelen = QNX4_NAME_MAX;
  124. buf->f_fsid.val[0] = (u32)id;
  125. buf->f_fsid.val[1] = (u32)(id >> 32);
  126. return 0;
  127. }
  128. /*
  129. * Check the root directory of the filesystem to make sure
  130. * it really _is_ a qnx4 filesystem, and to check the size
  131. * of the directory entry.
  132. */
  133. static const char *qnx4_checkroot(struct super_block *sb)
  134. {
  135. struct buffer_head *bh;
  136. struct qnx4_inode_entry *rootdir;
  137. int rd, rl;
  138. int i, j;
  139. if (*(qnx4_sb(sb)->sb->RootDir.di_fname) != '/')
  140. return "no qnx4 filesystem (no root dir).";
  141. QNX4DEBUG((KERN_NOTICE "QNX4 filesystem found on dev %s.\n", sb->s_id));
  142. rd = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_blk) - 1;
  143. rl = le32_to_cpu(qnx4_sb(sb)->sb->RootDir.di_first_xtnt.xtnt_size);
  144. for (j = 0; j < rl; j++) {
  145. bh = sb_bread(sb, rd + j); /* root dir, first block */
  146. if (bh == NULL)
  147. return "unable to read root entry.";
  148. rootdir = (struct qnx4_inode_entry *) bh->b_data;
  149. for (i = 0; i < QNX4_INODES_PER_BLOCK; i++, rootdir++) {
  150. QNX4DEBUG((KERN_INFO "rootdir entry found : [%s]\n", rootdir->di_fname));
  151. if (strcmp(rootdir->di_fname, QNX4_BMNAME) != 0)
  152. continue;
  153. qnx4_sb(sb)->BitMap = kmemdup(rootdir,
  154. sizeof(struct qnx4_inode_entry),
  155. GFP_KERNEL);
  156. brelse(bh);
  157. if (!qnx4_sb(sb)->BitMap)
  158. return "not enough memory for bitmap inode";
  159. /* keep bitmap inode known */
  160. return NULL;
  161. }
  162. brelse(bh);
  163. }
  164. return "bitmap file not found.";
  165. }
  166. static int qnx4_fill_super(struct super_block *s, void *data, int silent)
  167. {
  168. struct buffer_head *bh;
  169. struct inode *root;
  170. const char *errmsg;
  171. struct qnx4_sb_info *qs;
  172. int ret = -EINVAL;
  173. qs = kzalloc(sizeof(struct qnx4_sb_info), GFP_KERNEL);
  174. if (!qs)
  175. return -ENOMEM;
  176. s->s_fs_info = qs;
  177. sb_set_blocksize(s, QNX4_BLOCK_SIZE);
  178. /* Check the superblock signature. Since the qnx4 code is
  179. dangerous, we should leave as quickly as possible
  180. if we don't belong here... */
  181. bh = sb_bread(s, 1);
  182. if (!bh) {
  183. printk(KERN_ERR "qnx4: unable to read the superblock\n");
  184. goto outnobh;
  185. }
  186. if ( le32_to_cpup((__le32*) bh->b_data) != QNX4_SUPER_MAGIC ) {
  187. if (!silent)
  188. printk(KERN_ERR "qnx4: wrong fsid in superblock.\n");
  189. goto out;
  190. }
  191. s->s_op = &qnx4_sops;
  192. s->s_magic = QNX4_SUPER_MAGIC;
  193. s->s_flags |= MS_RDONLY; /* Yup, read-only yet */
  194. qnx4_sb(s)->sb_buf = bh;
  195. qnx4_sb(s)->sb = (struct qnx4_super_block *) bh->b_data;
  196. /* check before allocating dentries, inodes, .. */
  197. errmsg = qnx4_checkroot(s);
  198. if (errmsg != NULL) {
  199. if (!silent)
  200. printk(KERN_ERR "qnx4: %s\n", errmsg);
  201. goto out;
  202. }
  203. /* does root not have inode number QNX4_ROOT_INO ?? */
  204. root = qnx4_iget(s, QNX4_ROOT_INO * QNX4_INODES_PER_BLOCK);
  205. if (IS_ERR(root)) {
  206. printk(KERN_ERR "qnx4: get inode failed\n");
  207. ret = PTR_ERR(root);
  208. goto outb;
  209. }
  210. ret = -ENOMEM;
  211. s->s_root = d_make_root(root);
  212. if (s->s_root == NULL)
  213. goto outb;
  214. brelse(bh);
  215. return 0;
  216. outb:
  217. kfree(qs->BitMap);
  218. out:
  219. brelse(bh);
  220. outnobh:
  221. kfree(qs);
  222. s->s_fs_info = NULL;
  223. return ret;
  224. }
  225. static void qnx4_put_super(struct super_block *sb)
  226. {
  227. struct qnx4_sb_info *qs = qnx4_sb(sb);
  228. kfree( qs->BitMap );
  229. kfree( qs );
  230. sb->s_fs_info = NULL;
  231. return;
  232. }
  233. static int qnx4_readpage(struct file *file, struct page *page)
  234. {
  235. return block_read_full_page(page,qnx4_get_block);
  236. }
  237. static sector_t qnx4_bmap(struct address_space *mapping, sector_t block)
  238. {
  239. return generic_block_bmap(mapping,block,qnx4_get_block);
  240. }
  241. static const struct address_space_operations qnx4_aops = {
  242. .readpage = qnx4_readpage,
  243. .bmap = qnx4_bmap
  244. };
  245. struct inode *qnx4_iget(struct super_block *sb, unsigned long ino)
  246. {
  247. struct buffer_head *bh;
  248. struct qnx4_inode_entry *raw_inode;
  249. int block;
  250. struct qnx4_inode_entry *qnx4_inode;
  251. struct inode *inode;
  252. inode = iget_locked(sb, ino);
  253. if (!inode)
  254. return ERR_PTR(-ENOMEM);
  255. if (!(inode->i_state & I_NEW))
  256. return inode;
  257. qnx4_inode = qnx4_raw_inode(inode);
  258. inode->i_mode = 0;
  259. QNX4DEBUG((KERN_INFO "reading inode : [%d]\n", ino));
  260. if (!ino) {
  261. printk(KERN_ERR "qnx4: bad inode number on dev %s: %lu is "
  262. "out of range\n",
  263. sb->s_id, ino);
  264. iget_failed(inode);
  265. return ERR_PTR(-EIO);
  266. }
  267. block = ino / QNX4_INODES_PER_BLOCK;
  268. if (!(bh = sb_bread(sb, block))) {
  269. printk(KERN_ERR "qnx4: major problem: unable to read inode from dev "
  270. "%s\n", sb->s_id);
  271. iget_failed(inode);
  272. return ERR_PTR(-EIO);
  273. }
  274. raw_inode = ((struct qnx4_inode_entry *) bh->b_data) +
  275. (ino % QNX4_INODES_PER_BLOCK);
  276. inode->i_mode = le16_to_cpu(raw_inode->di_mode);
  277. i_uid_write(inode, (uid_t)le16_to_cpu(raw_inode->di_uid));
  278. i_gid_write(inode, (gid_t)le16_to_cpu(raw_inode->di_gid));
  279. set_nlink(inode, le16_to_cpu(raw_inode->di_nlink));
  280. inode->i_size = le32_to_cpu(raw_inode->di_size);
  281. inode->i_mtime.tv_sec = le32_to_cpu(raw_inode->di_mtime);
  282. inode->i_mtime.tv_nsec = 0;
  283. inode->i_atime.tv_sec = le32_to_cpu(raw_inode->di_atime);
  284. inode->i_atime.tv_nsec = 0;
  285. inode->i_ctime.tv_sec = le32_to_cpu(raw_inode->di_ctime);
  286. inode->i_ctime.tv_nsec = 0;
  287. inode->i_blocks = le32_to_cpu(raw_inode->di_first_xtnt.xtnt_size);
  288. memcpy(qnx4_inode, raw_inode, QNX4_DIR_ENTRY_SIZE);
  289. if (S_ISREG(inode->i_mode)) {
  290. inode->i_fop = &generic_ro_fops;
  291. inode->i_mapping->a_ops = &qnx4_aops;
  292. qnx4_i(inode)->mmu_private = inode->i_size;
  293. } else if (S_ISDIR(inode->i_mode)) {
  294. inode->i_op = &qnx4_dir_inode_operations;
  295. inode->i_fop = &qnx4_dir_operations;
  296. } else if (S_ISLNK(inode->i_mode)) {
  297. inode->i_op = &page_symlink_inode_operations;
  298. inode->i_mapping->a_ops = &qnx4_aops;
  299. qnx4_i(inode)->mmu_private = inode->i_size;
  300. } else {
  301. printk(KERN_ERR "qnx4: bad inode %lu on dev %s\n",
  302. ino, sb->s_id);
  303. iget_failed(inode);
  304. brelse(bh);
  305. return ERR_PTR(-EIO);
  306. }
  307. brelse(bh);
  308. unlock_new_inode(inode);
  309. return inode;
  310. }
  311. static struct kmem_cache *qnx4_inode_cachep;
  312. static struct inode *qnx4_alloc_inode(struct super_block *sb)
  313. {
  314. struct qnx4_inode_info *ei;
  315. ei = kmem_cache_alloc(qnx4_inode_cachep, GFP_KERNEL);
  316. if (!ei)
  317. return NULL;
  318. return &ei->vfs_inode;
  319. }
  320. static void qnx4_i_callback(struct rcu_head *head)
  321. {
  322. struct inode *inode = container_of(head, struct inode, i_rcu);
  323. kmem_cache_free(qnx4_inode_cachep, qnx4_i(inode));
  324. }
  325. static void qnx4_destroy_inode(struct inode *inode)
  326. {
  327. call_rcu(&inode->i_rcu, qnx4_i_callback);
  328. }
  329. static void init_once(void *foo)
  330. {
  331. struct qnx4_inode_info *ei = (struct qnx4_inode_info *) foo;
  332. inode_init_once(&ei->vfs_inode);
  333. }
  334. static int init_inodecache(void)
  335. {
  336. qnx4_inode_cachep = kmem_cache_create("qnx4_inode_cache",
  337. sizeof(struct qnx4_inode_info),
  338. 0, (SLAB_RECLAIM_ACCOUNT|
  339. SLAB_MEM_SPREAD),
  340. init_once);
  341. if (qnx4_inode_cachep == NULL)
  342. return -ENOMEM;
  343. return 0;
  344. }
  345. static void destroy_inodecache(void)
  346. {
  347. /*
  348. * Make sure all delayed rcu free inodes are flushed before we
  349. * destroy cache.
  350. */
  351. rcu_barrier();
  352. kmem_cache_destroy(qnx4_inode_cachep);
  353. }
  354. static struct dentry *qnx4_mount(struct file_system_type *fs_type,
  355. int flags, const char *dev_name, void *data)
  356. {
  357. return mount_bdev(fs_type, flags, dev_name, data, qnx4_fill_super);
  358. }
  359. static struct file_system_type qnx4_fs_type = {
  360. .owner = THIS_MODULE,
  361. .name = "qnx4",
  362. .mount = qnx4_mount,
  363. .kill_sb = kill_block_super,
  364. .fs_flags = FS_REQUIRES_DEV,
  365. };
  366. MODULE_ALIAS_FS("qnx4");
  367. static int __init init_qnx4_fs(void)
  368. {
  369. int err;
  370. err = init_inodecache();
  371. if (err)
  372. return err;
  373. err = register_filesystem(&qnx4_fs_type);
  374. if (err) {
  375. destroy_inodecache();
  376. return err;
  377. }
  378. printk(KERN_INFO "QNX4 filesystem 0.2.3 registered.\n");
  379. return 0;
  380. }
  381. static void __exit exit_qnx4_fs(void)
  382. {
  383. unregister_filesystem(&qnx4_fs_type);
  384. destroy_inodecache();
  385. }
  386. module_init(init_qnx4_fs)
  387. module_exit(exit_qnx4_fs)
  388. MODULE_LICENSE("GPL");