Browse Source

initial import of incomplete GTP code

master
Harald Welte 8 years ago
parent
commit
33698306b9
  1. 2
      include/uapi/linux/udp.h
  2. 8
      net/ipv4/Kconfig
  3. 1
      net/ipv4/Makefile
  4. 662
      net/ipv4/gtp.c

2
include/uapi/linux/udp.h

@ -34,6 +34,8 @@ struct udphdr {
#define UDP_ENCAP_ESPINUDP_NON_IKE 1 /* draft-ietf-ipsec-nat-t-ike-00/01 */
#define UDP_ENCAP_ESPINUDP 2 /* draft-ietf-ipsec-udp-encaps-06 */
#define UDP_ENCAP_L2TPINUDP 3 /* rfc2661 */
#define UDP_ENCAP_GTP0 4 /* GTP0 according to GSM TS 09.60 */
#define UDP_ENCAP_GTP1U 5 /* GTP1 according to 3GPP TS 29.060 */
#endif /* _UAPI_LINUX_UDP_H */

8
net/ipv4/Kconfig

@ -212,6 +212,14 @@ config NET_IPGRE_BROADCAST
Network), but can be distributed all over the Internet. If you want
to do that, say Y here and to "IP multicast routing" below.
config NET_GTP
tristate "IP: GTP tunnels over UDP"
select NET_IP_TUNNEL
help
The GTP protocol is used extensively in mobile communications
networks, specifically (not only) at the Gp interface between
the SGSN and the GGSN in GSM and UMTS.
config IP_MROUTE
bool "IP: multicast routing"
depends on IP_MULTICAST

1
net/ipv4/Makefile

@ -22,6 +22,7 @@ obj-$(CONFIG_NET_IPIP) += ipip.o
gre-y := gre_demux.o gre_offload.o
obj-$(CONFIG_NET_IPGRE_DEMUX) += gre.o
obj-$(CONFIG_NET_IPGRE) += ip_gre.o
obj-$(CONFIG_NET_GTP) += gtp.o
obj-$(CONFIG_NET_IPVTI) += ip_vti.o
obj-$(CONFIG_SYN_COOKIES) += syncookies.o
obj-$(CONFIG_INET_AH) += ah4.o

662
net/ipv4/gtp.c

@ -0,0 +1,662 @@
/* GTP according to GSM TS 09.60 / 3GPP TS 29.060 */
/* (C) 2012 by sysmocom - s.f.m.c. GmbH
* Author: Harald Welte <hwelte@sysmocom.de>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/udp.h>
#include <linux/rculist.h>
#include <linux/jhash.h>
#include <net/protocol.h>
#include <net/ip.h>
/* general GTP protocol related definitions */
#define GTP0_PORT 3386
#define GTP1U_PORT 2152
struct gtp0_header { /* According to GSM TS 09.60 */
uint8_t flags;
uint8_t type;
uint16_t length;
uint16_t seq;
uint16_t flow;
uint8_t number;
uint8_t spare[3];
uint64_t tid;
} __attribute__ ((packed));
struct gtp1_header_short { /* According to 3GPP TS 29.060 */
uint8_t flags;
uint8_t type;
uint16_t length;
uint32_t tei;
} __attribute__ ((packed));
/* implementation-specific definitions */
struct gsn {
struct list_head list;
};
struct pdp_ctx {
struct hlist_node hlist;
uint64_t tid;
uint8_t gtp_version;
unsigned short int af;
union {
struct in6_addr ip6;
uint32_t ip4;
} ms_addr;
/* user plane and control plane address of remote GSN */
struct sockaddr remote_c;
struct sockaddr remote_u;
uint16_t flow;
atomic_t tx_seq;
};
/* One local instance of the GTP code base */
struct gtp_instance {
struct list_head list;
struct sockaddr_in gtp0_addr;
struct sockaddr_in gtp1u_addr;
struct socket *sock0;
struct socket *sock1u;
struct net_device *dev;
/* FIXME: hash / tree of pdp contexts */
unsigned int hash_size;
struct hlist_head *tei_hash;
struct hlist_head *addr_hash;
};
static inline uint32_t gtp0_hashfn(uint64_t tid)
{
uint32_t *tid32 = (uint32_t *) &tid;
return jhash_2words(tid32[0], tid32[1], gtp_h_initval);
}
static inline uint32_t gtp1u_hashfn(uint32_t tei)
{
return jhash_1words(tei, gtp_h_initval);
}
static inline uint32_t ipv4_hashfn(uint32_t ip)
{
return jhash_1words(ip, gtp_h_initval);
}
static inline uint32_t ipv6_hashfn(struct in6_addr *ip6)
{
return jhash2(&ip6->s6_addr32, sizeof(*ip6)/4, gtp_h_initval);
}
/* resolve a PDP context structure based on the 64bit TID */
static struct pdp_ctx *gtp0_pdp_find(struct gtp_instance *gti, uint64_t tid)
{
struct hlist_head *head;
struct hlist_node *pos;
struct pdp_ctx *pdp;
head = &gti->tei_hash[gtp0_hashfn(tid) % gti->hash_size];
hlist_for_each_entry_rcu(pdp, pos, head, hlist) {
if (pdp->gtp_version == 0 && gtp->tid == tid)
return pdp;
}
return NULL;
}
/* resolve a PDP context structure based on the 32bit TEI */
static struct pdp_ctx *gtp1_pdp_find(struct gtp_instance *gti, uint32_t tei)
{
struct hlist_head *head;
struct hlist_node *pos;
struct pdp_ctx *pdp;
head = &gti->tei_hash[gtp1u_hashfn(tei) % gti->hash_size];
hlist_for_each_entry_rcu(pdp, pos, head, hlist) {
if (pdp->gtp_version == 1 && gtp->tid == tei)
return pdp;
}
return NULL;
}
/* resolve a PDP context based on IPv4 address of MS */
static struct pdp_ctx *ipv4_pdp_find(struct gtp_instance *gti,
uint32_t ms_addr)
{
struct hlist_head *head;
struct hlist_node *pos;
struct pdp_ctx *pdp;
head = &gti->addr_hash[ipv4_hashfn(ms_addr) % gti->hash_size];
hlist_for_each_entry_rcu(pdp, pos, head, hlist) {
if (pdp->af == AF_INET && gtp->ms_addr.ip4 == ms_addr)
return pdp;
}
return NULL;
}
/* resolve a PDP context based on IPv6 address of MS */
static struct pdp_ctx *ipv6_pdp_find(struct gtp_instance *gti,
struct in6_addr *ms_addr)
{
struct hlist_head *head;
struct hlist_node *pos;
struct pdp_ctx *pdp;
head = &gti->addr_hash[ipv6_hashfn(ms_addr) % gti->hash_size];
hlist_for_each_entry_rcu(pdp, pos, head, hlist) {
if (pdp->af == AF_INET6 &&
!memcmp(&gtp->ms_addr.ip6, ms_addr, sizeof(*ms_addr)))
return pdp;
}
return NULL;
}
/* resolve the GTP instance for a given sock */
static inline struct gtp_instance *sk_to_gti(struct sock *sk)
{
struct gtp_instance *gti;
if (!sk)
return NULL;
sock_hold(sk);
gti = (struct gtp_instance *) sk->sk_user_data;
if (!gti) {
sock_put(sk);
return NULL;
}
return gti;
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes: 0: succes, <0: error, >0: passed up to userspace UDP */
static int gtp0_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct gtp0_header *gtp0 = skb_transport_header(skb);
struct gtp_instance *gti;
struct pdp_ctx *pctx;
uint64_t tid;
/* resolve the GTP instance to which the socket belongs */
gti = sk_to_gti(sk);
if (!gti)
goto user;
/* check for sufficient header size */
if (!pskb_may_pull(skb, sizeof(struct udphdr) + sizeof(*gtp0)))
goto drop_put;
/* check for GTP Version 0 */
if ((gtp0->flags >> 5) != 0)
goto drop_put;
/* check if it is T-PDU. if not -> userspace */
if (gtp0->type != GTP_GPDU)
goto user_put;
/* look-up the PDP context for the Tunnel ID */
tid = be64_to_cpu(gtp0->tid);
rcu_read_lock_bh();
pctx = gtp0_pdp_find(gti, tid);
if (!pctx)
goto drop_put_rcu;
/* get rid of the UDP and GTP header */
__skb_pull(skb, sizeof(struct udphdr) + sizeof(*gtp0));
/* FIXME: check if the inner IP header has the source address
* assigned to the current MS */
/* re-submit via virtual tunnel device into regular network
* stack */
secpath_reset(skb);
skb_dst_drop(skb);
nf_reset(skb);
rc = dev_forward_skb(dev, skb);
drop_put_rcu:
rcu_read_unlock_bh();
drop_put:
sock_put(sk);
return 0;
user_put:
sock_put(sk);
user:
return 1;
}
/* UDP encapsulation receive handler. See net/ipv4/udp.c.
* Return codes: 0: succes, <0: error, >0: passed up to userspace UDP */
static int gtp1_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
{
struct gtp1_header_short *gtp1 = skb_transport_header(skb);
struct gtp_instance *gti;
struct pdp_ctx *pctx;
unsigned int min_len = sizeof(*gtp1);
uint64_t tid;
/* resolve the GTP instance to which the socket belongs */
gti = sk_to_gti(sk);
if (!gti)
goto user;
/* check for sufficient header size */
if (!pskb_may_pull(skb, sizeof(struct udphdr) + sizeof(*gtp1)))
goto drop_put;
/* check for GTP Version 1 */
if ((gtp0->flags >> 5) != 1)
goto drop_put;
/* FIXME: a look-up table might be faster than computing the
* length iteratively */
/* sequence number present */
if (gtp0->flags & 0x02)
min_len += 2;
/* N-PDU number present */
if (gtp0->flags & 0x01)
min_len++;
/* next extension header type present */
if (gtp0->flags & 0x04)
min_len += 1;
/* check if it is T-PDU. */
if (gtp0->type != GTP_GPDU)
goto drop_put;
/* check for sufficient header size */
if (!pskb_may_pull(skb, sizeof(struct udphdr) + min_len))
goto drop_put;
/* FIXME: actually take care of extension header chain */
/* look-up the PDP context for the Tunnel ID */
tid = ntohl(gtp1->tei);
rcu_read_lock_bh();
pctx = gtp1_pdp_find(gti, tid);
if (!pctx)
goto drop_put_rcu;
/* get rid of the UDP and GTP header */
__skb_pull(skb, sizeof(struct udphdr) + sizeof(*gtp1));
/* FIXME: check if the inner IP header has the source address
* assigned to the current MS */
/* re-submit via virtual tunnel device into regular network
* stack */
secpath_reset(skb);
skb_dst_drop(skb);
nf_reset(skb);
rc = dev_forward_skb(dev, skb);
drop_put_rcu:
rcu_read_unlock_bh();
drop_put:
sock_put(sk);
return 0;
user:
return 1;
}
static int gtp_dev_init(struct net_device *dev)
{
struct gtp_instance *gti = netdev_priv(dev);
dev->header_ops = &gtp_header_ops; // ?
dev->tstats = alloc_percpu(struct pcpu_tstats);
if (!dev->tstats)
return -ENOMEM;
return 0;
}
static void gtp_dev_uninit(strut net_device *dev)
{
dev_put(dev);
}
#define IP_UDP_LEN (sizeof(struct iphdr) + sizeof(struct udphdr))
static netdev_tx_t gtp_dev_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct gtp_instance gti = netdev_priv(dev);
struct pdp_context *pctx;
struct pcup_tstats *tstats;
unsigned int payload_len;
/* read the IP desination address and resolve the PDP context.
* Prepend PDP header with TEI/TID from PDP ctx */
if (skb->protocol == htons(ETH_P_IP)) {
struct iphdr *iph = ip_hdr(skb);
rcu_read_lock_bh();
pctx = ipv4_pdp_find(gti, iph->daddr);
} else if (skb->protocol == htons(ETH_P_IPV6)) {
struct ipv6hdr *iph6 = ipv6_hdr(skb);
rcu_read_lock_bh();
pctx = ipv6_pdp_find(gti, &iph6->daddr);
} else
return NETDEV_TX_OK;
/* FIXME: does this include IP+UDP but not Eth header? */
payload_len = skb->len;
if (pctx->gtp_version == 0) {
struct gtp0_header *gtp0;
/* ensure there is sufficient headroom */
skb_cow(skb, sizeof(*gtp0) + IP_UDP_LEN);
gtp0 = (struct gtp0_header *) skb_push(skb, sizeof(*gtp0));
gtp0->flags = 0;
gtp0->type = GTP_GPDU;
gtp0->length = payload_len;
gtp0->seq = atomic_inc_return(&pctx->tx_seq) % 0xffff;
gtp0->flow = pctx->flow;
gtp0->number = 0xFF;
gtp0->spare[0] = gtp0->spare[1] = gtp0->spare[2] = 0;
gtp0->tid = pctx->tid;
} else if (pctx->gtp_version == 1) {
struct gtp1u_header *gtp1u;
/* ensure there is sufficient headroom */
skb_cow(skb, sizeof(*gtp1u) + IP_UDP_LEN);
gtp1u = (struct gtp1u_header *) skb_push(skb, sizeof(*gtp1u));
gtp1u->flags = (1 << 5) | 0x10; /* V1, GTP-non-prime */
gtp1u->type = GTP_GPDU;
gtp1u->length = payload_len;
gtp1u->tei = pctx->tid;
} else {
rcu_read_unlock_bh();
return NETDEV_TX_OK;
}
old_iph = ip_hdr(skb);
/* new UDP and IP header in front of GTP header */
skb_push(skb, sizeof(struct udphdr));
skb_reset_transport_header(skb);
skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED |
IPSKB_REROUTED);
skb_dst_drop(skb);
skb_dst_set(skb, &rt->dst);
rt = ip4_route_output_gtp(dev_net(dev), &fl4,
pctx->remote_u...,
gtpi->gtp0_addr,
old_iph->tos,
FIXME_link);
if (IS_ERR(rt)) {
dev->stats.tx_carrier_errors++;
goto tx_error;
}
tdev = rt->dst.dev;
if (tdev == dev) {
ip_rt_put(rt);
dev->stats.collissions++;
goto tx_error;
}
df = gti->frag_off;
if (df)
mtu = dst_mtu(&rt->dst) - dev->hard_header_len - tunnel->hlen;
else
mtu = skb_dst(skb) ? dst_mtu(skb_dst(skb)) : dev->mtu;
if (skb_dst(skb))
skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
if (skb->protocol == htons(ETH_P_IP)) {
df |= (old_iph->frag_off & htons(IP_DF));
if ((old_iph->frag_off & htons(IP_DF)) &&
mtu < ntohs(old_iph->tot_len)) {
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(mtu));
ip_rt_put(rt);
goto tx_error;
}
#if IS_ENABLED(CONFIG_IPV6)
} else if (skb->protocol == htons(ETH_P_IPV6)) {
#warn FIXME implement IPv6
}
#endif
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = sizeof(struct iphdr) >> 2;
iph->frag_off = old_iph->frag_off;
iph->protocol = IPPROTO_UDP;
iph->tos = old_iph->tos;
iph->daddr = fl4.daddr;
iph->saddr = fl4.saddr;
iph->ttl = ip4_dst_hoplimit(&rt->dst);
uh = udp_hdr(skb);
if (pctx->gtp_version == 0)
uh->source = uh->dest = GTP0_PORT;
else
uh->source = uh->dest = GTP1U_PORT;
uh->len = ;
rcu_read_unlock_bh();
nf_reset(skb);
tstats = this_cpu_ptr(dev->tstats);
__IPTUNNEL_XMIT(tstats, &dev->stats);
return NETDEV_TX_OK;
tx_error:
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static struct rtable *
ip4_route_output_gtp(struct net *net, struct flowi4 *fl4,
__be32 daddr, __be32 saddr, __u8 tos, int oif)
{
memset(fl4, 0, sizeof(*fl4));
fl4->flowi4_oif = oif;
fl4->daddr = daddr;
fl4->saddr = saddr;
fl4->flowi4_tos = tos;
fl4->flowi4_proto = IPPROTO_UDP;
return ip_route_output_key(net, fl4);
}
static const struct net_device_ops gtp_netdev_ops = {
.ndo_init = gtp_dev_init,
.ndo_uninit = gtp_dev_uninit,
.ndo_start_xmit = gtp_dev_xmit,
};
static const struct nla_policy gtp_link_policy[IFLA_GTP_MAX + 1] = {
/* FIXME */
};
static void gtp_link_setup(struct net_device *dev)
{
dev->netdev_ops = &gtp_netdev_ops;
dev->descructor = gtp_dev_free;
dev->type = FIXME;
dev->needed_headroom = FIXME;
dev->mtu = FIXME;
dev->flags = IFF_NOARP;
dev->iflink = 0;
dev->addr_len = FIXME;
dev->features |= NETIF_F_NETNS_LOCAL; // ?
dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; // ?
}
static void gtp_link_validate(struct nlattr *tb[], struct nlattr *data[])
{
/* FIXME */
return 0;
}
static int gtp_newlink(struct net *src_net, struct net_device *dev,
struct nlattr *tb[])
{
int rc;
/* FIXME: local IP address for GTP UDP sockets */
rc = register_netdevice(dev);
if (rc)
return rc;
return 0;
}
static int gtp_changelink(struct net_device *dev, struct nlattr *tb[],
struct nlattr *data[])
{
/* FIXME: local IP address for GTP UDP sockets */
return 0;
}
static int gtp_link_get_size(const struct net_device *dev)
{
return 0; /* FIXME */
}
static int gtp_link_fill_info(struct sk_buff *skb, const struct net_device *dev)
{
struct gtp_instance *gti = netdev_priv(dev);
/* FIXME: local IP address for GTP UDP sockets */
return 0;
}
static struct rtnl_link_ops gtp_link_ops __read_mostly = {
.kind = "gtp",
.maxtype = IFLA_GTP_MAX,
.policy = gtp_link_policy,
.priv_size = sizeof(struct gtp_instance),
.setup = gtp_link_setup,
//.validate = gtp_link_validate,
.newlink = gtp_newlink,
.changelink = gtp_changelink,
.get_size = gtp_link_get_size,
.fill_info = gtp_link_fill_info,
};
static int gtp_create_bind_sock(struct gtp_instance *gti)
{
int rc;
struct sockaddr_in sin;
struct sk *sk;
/* Create and bind the socket for GTP0 */
rc = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &gti->sock0);
if (rc < 0)
goto out;
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = htons(GTP0_PORT);
rc = kernel_bind(gti->sock0, (struct sockaddr *) &sin, sizeof(sin));
if (rc < 0)
goto out;
sk = s->sock0->sk;
udp_sk(sk)->encap_type = UDP_ENCAP_GTP0;
udp_sk(sk)->encap_rcv = gtp0_udp_encap_rcv;
udp_encap_enable();
/* Create and bind the socket for GTP1 user-plane */
rc = sock_create(AF_INET, SOCK_DGRAM, IPPROTO_UDP, &gti->sock1u);
if (rc < 0)
goto out_free0;
memset(&sin, 0, sizeof(sin));
sin.sin_family = AF_INET;
sin.sin_port = htons(GTP1U_PORT);
rc = kernel_bind(gti->sock1u, (struct sockaddr *) &sin, sizeof(sin));
if (rc < 0)
goto out_free1;
sk = s->sock1u->sk;
udp_sk(sk)->encap_type = UDP_ENCAP_GTP1U;
udp_sk(sk)->encap_rcv = gtp1u_udp_encap_rcv;
return 0;
out_free1:
sock_release(&s->sock1u);
out_free0:
sock_release(&s->sock0);
out:
return rc;
}
static int __init gtp_init(void)
{
int rc;
rc = rtnl_link_register(&gtp_link_ops);
if (rc < 0)
return rc;
}
static void __exit gtp_fini(void)
{
rtnl_link_unregister(&gtp_link_ops);
}
module_init(gtp_init);
module_exit(gtp_fini);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <hwelte@sysmocom.de>");
MODULE_ALIAS_RTNL_LINK("gtp");
MODULE_ALIAS_NETDEV("gtp0");
Loading…
Cancel
Save