00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014 #ifndef _LINUX_SKBUFF_H
00015 #define _LINUX_SKBUFF_H
00016
00017 #include <linux/config.h>
00018 #include <linux/kernel.h>
00019 #include <linux/sched.h>
00020 #include <linux/time.h>
00021 #include <linux/cache.h>
00022
00023 #include <asm/atomic.h>
00024 #include <asm/types.h>
00025 #include <linux/spinlock.h>
00026 #include <linux/mm.h>
00027 #include <linux/highmem.h>
00028
00029 #define HAVE_ALLOC_SKB
00030 #define HAVE_ALIGNABLE_SKB
00031 #define SLAB_SKB
00032
00033 #define CHECKSUM_NONE 0
00034 #define CHECKSUM_HW 1
00035 #define CHECKSUM_UNNECESSARY 2
00036
00037 #define SKB_DATA_ALIGN(X) (((X) + (SMP_CACHE_BYTES-1)) & ~(SMP_CACHE_BYTES-1))
00038 #define SKB_MAX_ORDER(X,ORDER) (((PAGE_SIZE<<(ORDER)) - (X) - sizeof(struct skb_shared_info))&~(SMP_CACHE_BYTES-1))
00039 #define SKB_MAX_HEAD(X) (SKB_MAX_ORDER((X),0))
00040 #define SKB_MAX_ALLOC (SKB_MAX_ORDER(0,2))
00041
00042
00043
00044
00045
00046
00047
00048
00049
00050
00051
00052
00053
00054
00055
00056
00057
00058
00059
00060
00061
00062
00063
00064
00065
00066
00067
00068
00069
00070
00071
00072
00073
00074
00075
00076
00077
00078
00079
00080 #ifdef __i386__
00081 #define NET_CALLER(arg) (*(((void**)&arg)-1))
00082 #else
00083 #define NET_CALLER(arg) __builtin_return_address(0)
00084 #endif
00085
00086 #ifdef CONFIG_NETFILTER
00087 struct nf_conntrack {
00088 atomic_t use;
00089 void (*destroy)(struct nf_conntrack *);
00090 };
00091
00092 struct nf_ct_info {
00093 struct nf_conntrack *master;
00094 };
00095 #endif
00096
00097 struct sk_buff_head {
00098
00099 struct sk_buff * next;
00100 struct sk_buff * prev;
00101
00102 __u32 qlen;
00103 spinlock_t lock;
00104 };
00105
00106 struct sk_buff;
00107
00108 #define MAX_SKB_FRAGS 6
00109
00110 typedef struct skb_frag_struct skb_frag_t;
00111
00112 struct skb_frag_struct
00113 {
00114 struct page *page;
00115 __u16 page_offset;
00116 __u16 size;
00117 };
00118
00119
00120
00121
00122 struct skb_shared_info {
00123 atomic_t dataref;
00124 unsigned int nr_frags;
00125 struct sk_buff *frag_list;
00126 skb_frag_t frags[MAX_SKB_FRAGS];
00127 };
00128
00129 struct sk_buff {
00130
00131 struct sk_buff * next;
00132 struct sk_buff * prev;
00133
00134 struct sk_buff_head * list;
00135 struct sock *sk;
00136 struct timeval stamp;
00137 struct net_device *dev;
00138 struct net_device *real_dev;
00139
00140
00141
00142
00143
00144 union
00145 {
00146 struct tcphdr *th;
00147 struct udphdr *uh;
00148 struct icmphdr *icmph;
00149 struct igmphdr *igmph;
00150 struct iphdr *ipiph;
00151 struct spxhdr *spxh;
00152 unsigned char *raw;
00153 } h;
00154
00155
00156 union
00157 {
00158 struct iphdr *iph;
00159 struct ipv6hdr *ipv6h;
00160 struct arphdr *arph;
00161 struct ipxhdr *ipxh;
00162 unsigned char *raw;
00163 } nh;
00164
00165
00166 union
00167 {
00168 struct ethhdr *ethernet;
00169 unsigned char *raw;
00170 } mac;
00171
00172 struct dst_entry *dst;
00173
00174
00175
00176
00177
00178
00179
00180 char cb[48];
00181
00182 unsigned int len;
00183 unsigned int data_len;
00184 unsigned int csum;
00185 unsigned char __unused,
00186 cloned,
00187 pkt_type,
00188 ip_summed;
00189 __u32 priority;
00190 atomic_t users;
00191 unsigned short protocol;
00192 unsigned short security;
00193 unsigned int truesize;
00194
00195 unsigned char *head;
00196 unsigned char *data;
00197 unsigned char *tail;
00198 unsigned char *end;
00199
00200 void (*destructor)(struct sk_buff *);
00201 #ifdef CONFIG_NETFILTER
00202
00203 unsigned long nfmark;
00204
00205 __u32 nfcache;
00206
00207 struct nf_ct_info *nfct;
00208 #ifdef CONFIG_NETFILTER_DEBUG
00209 unsigned int nf_debug;
00210 #endif
00211 #endif
00212
00213 #if defined(CONFIG_HIPPI)
00214 union{
00215 __u32 ifield;
00216 } private;
00217 #endif
00218
00219 #ifdef CONFIG_NET_SCHED
00220 __u32 tc_index;
00221 #endif
00222 };
00223
00224 #ifdef __KERNEL__
00225
00226
00227
00228 #include <linux/slab.h>
00229
00230 #include <asm/system.h>
00231
00232 extern void __kfree_skb(struct sk_buff *skb);
00233 extern struct sk_buff * alloc_skb(unsigned int size, int priority);
00234 extern void kfree_skbmem(struct sk_buff *skb);
00235 extern struct sk_buff * skb_clone(struct sk_buff *skb, int priority);
00236 extern struct sk_buff * skb_copy(const struct sk_buff *skb, int priority);
00237 extern struct sk_buff * pskb_copy(struct sk_buff *skb, int gfp_mask);
00238 extern int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail, int gfp_mask);
00239 extern struct sk_buff * skb_realloc_headroom(struct sk_buff *skb, unsigned int headroom);
00240 extern struct sk_buff * skb_copy_expand(const struct sk_buff *skb,
00241 int newheadroom,
00242 int newtailroom,
00243 int priority);
00244 extern struct sk_buff * skb_pad(struct sk_buff *skb, int pad);
00245 #define dev_kfree_skb(a) kfree_skb(a)
00246 extern void skb_over_panic(struct sk_buff *skb, int len, void *here);
00247 extern void skb_under_panic(struct sk_buff *skb, int len, void *here);
00248
00249
00250 #define skb_shinfo(SKB) ((struct skb_shared_info *)((SKB)->end))
00251
00259 static inline int skb_queue_empty(struct sk_buff_head *list)
00260 {
00261 return (list->next == (struct sk_buff *) list);
00262 }
00263
00272 static inline struct sk_buff *skb_get(struct sk_buff *skb)
00273 {
00274 atomic_inc(&skb->users);
00275 return skb;
00276 }
00277
00278
00279
00280
00281
00282
00291 static inline void kfree_skb(struct sk_buff *skb)
00292 {
00293 if (likely(atomic_read(&skb->users) == 1))
00294 smp_rmb();
00295 else if (likely(!atomic_dec_and_test(&skb->users)))
00296 return;
00297 __kfree_skb(skb);
00298 }
00299
00309 static inline int skb_cloned(struct sk_buff *skb)
00310 {
00311 return skb->cloned && atomic_read(&skb_shinfo(skb)->dataref) != 1;
00312 }
00313
00322 static inline int skb_shared(struct sk_buff *skb)
00323 {
00324 return (atomic_read(&skb->users) != 1);
00325 }
00326
00341 static inline struct sk_buff *skb_share_check(struct sk_buff *skb, int pri)
00342 {
00343 if (skb_shared(skb)) {
00344 struct sk_buff *nskb;
00345 nskb = skb_clone(skb, pri);
00346 kfree_skb(skb);
00347 return nskb;
00348 }
00349 return skb;
00350 }
00351
00352
00353
00354
00355
00356
00357
00358
00359
00374 static inline struct sk_buff *skb_unshare(struct sk_buff *skb, int pri)
00375 {
00376 struct sk_buff *nskb;
00377 if(!skb_cloned(skb))
00378 return skb;
00379 nskb=skb_copy(skb, pri);
00380 kfree_skb(skb);
00381 return nskb;
00382 }
00383
00398 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
00399 {
00400 struct sk_buff *list = ((struct sk_buff *)list_)->next;
00401 if (list == (struct sk_buff *)list_)
00402 list = NULL;
00403 return list;
00404 }
00405
00420 static inline struct sk_buff *skb_peek_tail(struct sk_buff_head *list_)
00421 {
00422 struct sk_buff *list = ((struct sk_buff *)list_)->prev;
00423 if (list == (struct sk_buff *)list_)
00424 list = NULL;
00425 return list;
00426 }
00427
00435 static inline __u32 skb_queue_len(struct sk_buff_head *list_)
00436 {
00437 return(list_->qlen);
00438 }
00439
00440 static inline void skb_queue_head_init(struct sk_buff_head *list)
00441 {
00442 spin_lock_init(&list->lock);
00443 list->prev = (struct sk_buff *)list;
00444 list->next = (struct sk_buff *)list;
00445 list->qlen = 0;
00446 }
00447
00448
00449
00450
00451
00452
00453
00454
00466 static inline void __skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
00467 {
00468 struct sk_buff *prev, *next;
00469
00470 newsk->list = list;
00471 list->qlen++;
00472 prev = (struct sk_buff *)list;
00473 next = prev->next;
00474 newsk->next = next;
00475 newsk->prev = prev;
00476 next->prev = newsk;
00477 prev->next = newsk;
00478 }
00479
00480
00493 static inline void skb_queue_head(struct sk_buff_head *list, struct sk_buff *newsk)
00494 {
00495 unsigned long flags;
00496
00497 spin_lock_irqsave(&list->lock, flags);
00498 __skb_queue_head(list, newsk);
00499 spin_unlock_irqrestore(&list->lock, flags);
00500 }
00501
00514 static inline void __skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
00515 {
00516 struct sk_buff *prev, *next;
00517
00518 newsk->list = list;
00519 list->qlen++;
00520 next = (struct sk_buff *)list;
00521 prev = next->prev;
00522 newsk->next = next;
00523 newsk->prev = prev;
00524 next->prev = newsk;
00525 prev->next = newsk;
00526 }
00527
00540 static inline void skb_queue_tail(struct sk_buff_head *list, struct sk_buff *newsk)
00541 {
00542 unsigned long flags;
00543
00544 spin_lock_irqsave(&list->lock, flags);
00545 __skb_queue_tail(list, newsk);
00546 spin_unlock_irqrestore(&list->lock, flags);
00547 }
00548
00558 static inline struct sk_buff *__skb_dequeue(struct sk_buff_head *list)
00559 {
00560 struct sk_buff *next, *prev, *result;
00561
00562 prev = (struct sk_buff *) list;
00563 next = prev->next;
00564 result = NULL;
00565 if (next != prev) {
00566 result = next;
00567 next = next->next;
00568 list->qlen--;
00569 next->prev = prev;
00570 prev->next = next;
00571 result->next = NULL;
00572 result->prev = NULL;
00573 result->list = NULL;
00574 }
00575 return result;
00576 }
00577
00587 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
00588 {
00589 unsigned long flags;
00590 struct sk_buff *result;
00591
00592 spin_lock_irqsave(&list->lock, flags);
00593 result = __skb_dequeue(list);
00594 spin_unlock_irqrestore(&list->lock, flags);
00595 return result;
00596 }
00597
00598
00599
00600
00601
00602 static inline void __skb_insert(struct sk_buff *newsk,
00603 struct sk_buff * prev, struct sk_buff *next,
00604 struct sk_buff_head * list)
00605 {
00606 newsk->next = next;
00607 newsk->prev = prev;
00608 next->prev = newsk;
00609 prev->next = newsk;
00610 newsk->list = list;
00611 list->qlen++;
00612 }
00613
00624 static inline void skb_insert(struct sk_buff *old, struct sk_buff *newsk)
00625 {
00626 unsigned long flags;
00627
00628 spin_lock_irqsave(&old->list->lock, flags);
00629 __skb_insert(newsk, old->prev, old, old->list);
00630 spin_unlock_irqrestore(&old->list->lock, flags);
00631 }
00632
00633
00634
00635
00636
00637 static inline void __skb_append(struct sk_buff *old, struct sk_buff *newsk)
00638 {
00639 __skb_insert(newsk, old, old->next, old->list);
00640 }
00641
00653 static inline void skb_append(struct sk_buff *old, struct sk_buff *newsk)
00654 {
00655 unsigned long flags;
00656
00657 spin_lock_irqsave(&old->list->lock, flags);
00658 __skb_append(old, newsk);
00659 spin_unlock_irqrestore(&old->list->lock, flags);
00660 }
00661
00662
00663
00664
00665
00666
00667 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
00668 {
00669 struct sk_buff * next, * prev;
00670
00671 list->qlen--;
00672 next = skb->next;
00673 prev = skb->prev;
00674 skb->next = NULL;
00675 skb->prev = NULL;
00676 skb->list = NULL;
00677 next->prev = prev;
00678 prev->next = next;
00679 }
00680
00694 static inline void skb_unlink(struct sk_buff *skb)
00695 {
00696 struct sk_buff_head *list = skb->list;
00697
00698 if(list) {
00699 unsigned long flags;
00700
00701 spin_lock_irqsave(&list->lock, flags);
00702 if(skb->list == list)
00703 __skb_unlink(skb, skb->list);
00704 spin_unlock_irqrestore(&list->lock, flags);
00705 }
00706 }
00707
00708
00709
00719 static inline struct sk_buff *__skb_dequeue_tail(struct sk_buff_head *list)
00720 {
00721 struct sk_buff *skb = skb_peek_tail(list);
00722 if (skb)
00723 __skb_unlink(skb, list);
00724 return skb;
00725 }
00726
00736 static inline struct sk_buff *skb_dequeue_tail(struct sk_buff_head *list)
00737 {
00738 unsigned long flags;
00739 struct sk_buff *result;
00740
00741 spin_lock_irqsave(&list->lock, flags);
00742 result = __skb_dequeue_tail(list);
00743 spin_unlock_irqrestore(&list->lock, flags);
00744 return result;
00745 }
00746
00747 static inline int skb_is_nonlinear(const struct sk_buff *skb)
00748 {
00749 return skb->data_len;
00750 }
00751
00752 static inline unsigned int skb_headlen(const struct sk_buff *skb)
00753 {
00754 return skb->len - skb->data_len;
00755 }
00756
00757 #define SKB_PAGE_ASSERT(skb) do { if (skb_shinfo(skb)->nr_frags) out_of_line_bug(); } while (0)
00758 #define SKB_FRAG_ASSERT(skb) do { if (skb_shinfo(skb)->frag_list) out_of_line_bug(); } while (0)
00759 #define SKB_LINEAR_ASSERT(skb) do { if (skb_is_nonlinear(skb)) out_of_line_bug(); } while (0)
00760
00761
00762
00763
00764
00765 static inline unsigned char *__skb_put(struct sk_buff *skb, unsigned int len)
00766 {
00767 unsigned char *tmp=skb->tail;
00768 SKB_LINEAR_ASSERT(skb);
00769 skb->tail+=len;
00770 skb->len+=len;
00771 return tmp;
00772 }
00773
00784 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
00785 {
00786 unsigned char *tmp=skb->tail;
00787 SKB_LINEAR_ASSERT(skb);
00788 skb->tail+=len;
00789 skb->len+=len;
00790 if(skb->tail>skb->end) {
00791 skb_over_panic(skb, len, current_text_addr());
00792 }
00793 return tmp;
00794 }
00795
00796 static inline unsigned char *__skb_push(struct sk_buff *skb, unsigned int len)
00797 {
00798 skb->data-=len;
00799 skb->len+=len;
00800 return skb->data;
00801 }
00802
00813 static inline unsigned char *skb_push(struct sk_buff *skb, unsigned int len)
00814 {
00815 skb->data-=len;
00816 skb->len+=len;
00817 if(skb->data<skb->head) {
00818 skb_under_panic(skb, len, current_text_addr());
00819 }
00820 return skb->data;
00821 }
00822
00823 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
00824 {
00825 skb->len-=len;
00826 if (skb->len < skb->data_len)
00827 out_of_line_bug();
00828 return skb->data+=len;
00829 }
00830
00842 static inline unsigned char * skb_pull(struct sk_buff *skb, unsigned int len)
00843 {
00844 if (len > skb->len)
00845 return NULL;
00846 return __skb_pull(skb,len);
00847 }
00848
00849 extern unsigned char * __pskb_pull_tail(struct sk_buff *skb, int delta);
00850
00851 static inline unsigned char *__pskb_pull(struct sk_buff *skb, unsigned int len)
00852 {
00853 if (len > skb_headlen(skb) &&
00854 __pskb_pull_tail(skb, len-skb_headlen(skb)) == NULL)
00855 return NULL;
00856 skb->len -= len;
00857 return skb->data += len;
00858 }
00859
00860 static inline unsigned char * pskb_pull(struct sk_buff *skb, unsigned int len)
00861 {
00862 if (len > skb->len)
00863 return NULL;
00864 return __pskb_pull(skb,len);
00865 }
00866
00867 static inline int pskb_may_pull(struct sk_buff *skb, unsigned int len)
00868 {
00869 if (len <= skb_headlen(skb))
00870 return 1;
00871 if (len > skb->len)
00872 return 0;
00873 return (__pskb_pull_tail(skb, len-skb_headlen(skb)) != NULL);
00874 }
00875
00883 static inline int skb_headroom(const struct sk_buff *skb)
00884 {
00885 return skb->data-skb->head;
00886 }
00887
00895 static inline int skb_tailroom(const struct sk_buff *skb)
00896 {
00897 return skb_is_nonlinear(skb) ? 0 : skb->end-skb->tail;
00898 }
00899
00909 static inline void skb_reserve(struct sk_buff *skb, unsigned int len)
00910 {
00911 skb->data+=len;
00912 skb->tail+=len;
00913 }
00914
00915 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len, int realloc);
00916
00917 static inline void __skb_trim(struct sk_buff *skb, unsigned int len)
00918 {
00919 if (!skb->data_len) {
00920 skb->len = len;
00921 skb->tail = skb->data+len;
00922 } else {
00923 ___pskb_trim(skb, len, 0);
00924 }
00925 }
00926
00936 static inline void skb_trim(struct sk_buff *skb, unsigned int len)
00937 {
00938 if (skb->len > len) {
00939 __skb_trim(skb, len);
00940 }
00941 }
00942
00943
00944 static inline int __pskb_trim(struct sk_buff *skb, unsigned int len)
00945 {
00946 if (!skb->data_len) {
00947 skb->len = len;
00948 skb->tail = skb->data+len;
00949 return 0;
00950 } else {
00951 return ___pskb_trim(skb, len, 1);
00952 }
00953 }
00954
00955 static inline int pskb_trim(struct sk_buff *skb, unsigned int len)
00956 {
00957 if (len < skb->len)
00958 return __pskb_trim(skb, len);
00959 return 0;
00960 }
00961
00972 static inline void skb_orphan(struct sk_buff *skb)
00973 {
00974 if (skb->destructor)
00975 skb->destructor(skb);
00976 skb->destructor = NULL;
00977 skb->sk = NULL;
00978 }
00979
00990 static inline void skb_queue_purge(struct sk_buff_head *list)
00991 {
00992 struct sk_buff *skb;
00993 while ((skb=skb_dequeue(list))!=NULL)
00994 kfree_skb(skb);
00995 }
00996
01007 static inline void __skb_queue_purge(struct sk_buff_head *list)
01008 {
01009 struct sk_buff *skb;
01010 while ((skb=__skb_dequeue(list))!=NULL)
01011 kfree_skb(skb);
01012 }
01013
01027 static inline struct sk_buff *__dev_alloc_skb(unsigned int length,
01028 int gfp_mask)
01029 {
01030 struct sk_buff *skb;
01031
01032 skb = alloc_skb(length+16, gfp_mask);
01033 if (skb)
01034 skb_reserve(skb,16);
01035 return skb;
01036 }
01037
01051 static inline struct sk_buff *dev_alloc_skb(unsigned int length)
01052 {
01053 return __dev_alloc_skb(length, GFP_ATOMIC);
01054 }
01055
01069 static inline int
01070 skb_cow(struct sk_buff *skb, unsigned int headroom)
01071 {
01072 int delta = (headroom > 16 ? headroom : 16) - skb_headroom(skb);
01073
01074 if (delta < 0)
01075 delta = 0;
01076
01077 if (delta || skb_cloned(skb))
01078 return pskb_expand_head(skb, (delta+15)&~15, 0, GFP_ATOMIC);
01079 return 0;
01080 }
01081
01094 static inline struct sk_buff *skb_padto(struct sk_buff *skb, unsigned int len)
01095 {
01096 unsigned int size = skb->len;
01097 if(likely(size >= len))
01098 return skb;
01099 return skb_pad(skb, len-size);
01100 }
01101
01109 int skb_linearize(struct sk_buff *skb, int gfp);
01110
01111 static inline void *kmap_skb_frag(const skb_frag_t *frag)
01112 {
01113 #ifdef CONFIG_HIGHMEM
01114 if (in_irq())
01115 out_of_line_bug();
01116
01117 local_bh_disable();
01118 #endif
01119 return kmap_atomic(frag->page, KM_SKB_DATA_SOFTIRQ);
01120 }
01121
01122 static inline void kunmap_skb_frag(void *vaddr)
01123 {
01124 kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
01125 #ifdef CONFIG_HIGHMEM
01126 local_bh_enable();
01127 #endif
01128 }
01129
01130 #define skb_queue_walk(queue, skb) \
01131 for (skb = (queue)->next; \
01132 (skb != (struct sk_buff *)(queue)); \
01133 skb=skb->next)
01134
01135
01136 extern struct sk_buff * skb_recv_datagram(struct sock *sk,unsigned flags,int noblock, int *err);
01137 extern unsigned int datagram_poll(struct file *file, struct socket *sock, struct poll_table_struct *wait);
01138 extern int skb_copy_datagram(const struct sk_buff *from, int offset, char *to,int size);
01139 extern int skb_copy_datagram_iovec(const struct sk_buff *from, int offset, struct iovec *to,int size);
01140 extern int skb_copy_and_csum_datagram(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int *csump);
01141 extern int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb, int hlen, struct iovec *iov);
01142 extern void skb_free_datagram(struct sock * sk, struct sk_buff *skb);
01143
01144 extern unsigned int skb_checksum(const struct sk_buff *skb, int offset, int len, unsigned int csum);
01145 extern int skb_copy_bits(const struct sk_buff *skb, int offset, void *to, int len);
01146 extern unsigned int skb_copy_and_csum_bits(const struct sk_buff *skb, int offset, u8 *to, int len, unsigned int csum);
01147 extern void skb_copy_and_csum_dev(const struct sk_buff *skb, u8 *to);
01148
01149 extern void skb_init(void);
01150 extern void skb_add_mtu(int mtu);
01151
01152 #ifdef CONFIG_NETFILTER
01153 static inline void
01154 nf_conntrack_put(struct nf_ct_info *nfct)
01155 {
01156 if (nfct && atomic_dec_and_test(&nfct->master->use))
01157 nfct->master->destroy(nfct->master);
01158 }
01159 static inline void
01160 nf_conntrack_get(struct nf_ct_info *nfct)
01161 {
01162 if (nfct)
01163 atomic_inc(&nfct->master->use);
01164 }
01165 static inline void
01166 nf_reset(struct sk_buff *skb)
01167 {
01168 nf_conntrack_put(skb->nfct);
01169 skb->nfct = NULL;
01170 #ifdef CONFIG_NETFILTER_DEBUG
01171 skb->nf_debug = 0;
01172 #endif
01173 }
01174 #else
01175 static inline void nf_reset(struct sk_buff *skb) {}
01176 #endif
01177
01178 #endif
01179 #endif