sched.h 9.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296
  1. /*
  2. * linux/include/linux/sunrpc/sched.h
  3. *
  4. * Scheduling primitives for kernel Sun RPC.
  5. *
  6. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  7. */
  8. #ifndef _LINUX_SUNRPC_SCHED_H_
  9. #define _LINUX_SUNRPC_SCHED_H_
  10. #include <linux/timer.h>
  11. #include <linux/sunrpc/types.h>
  12. #include <linux/rcupdate.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/wait.h>
  15. #include <linux/workqueue.h>
  16. #include <linux/sunrpc/xdr.h>
  17. /*
  18. * This is the actual RPC procedure call info.
  19. */
  20. struct rpc_procinfo;
  21. struct rpc_message {
  22. struct rpc_procinfo * rpc_proc; /* Procedure information */
  23. void * rpc_argp; /* Arguments */
  24. void * rpc_resp; /* Result */
  25. struct rpc_cred * rpc_cred; /* Credentials */
  26. };
  27. struct rpc_call_ops;
  28. struct rpc_wait_queue;
  29. struct rpc_wait {
  30. struct list_head list; /* wait queue links */
  31. struct list_head links; /* Links to related tasks */
  32. struct rpc_wait_queue * rpc_waitq; /* RPC wait queue we're on */
  33. };
  34. /*
  35. * This is the RPC task struct
  36. */
  37. struct rpc_task {
  38. #ifdef RPC_DEBUG
  39. unsigned long tk_magic; /* 0xf00baa */
  40. #endif
  41. atomic_t tk_count; /* Reference count */
  42. struct list_head tk_task; /* global list of tasks */
  43. struct rpc_clnt * tk_client; /* RPC client */
  44. struct rpc_rqst * tk_rqstp; /* RPC request */
  45. int tk_status; /* result of last operation */
  46. /*
  47. * RPC call state
  48. */
  49. struct rpc_message tk_msg; /* RPC call info */
  50. __u8 tk_garb_retry;
  51. __u8 tk_cred_retry;
  52. unsigned long tk_cookie; /* Cookie for batching tasks */
  53. /*
  54. * timeout_fn to be executed by timer bottom half
  55. * callback to be executed after waking up
  56. * action next procedure for async tasks
  57. * tk_ops caller callbacks
  58. */
  59. void (*tk_timeout_fn)(struct rpc_task *);
  60. void (*tk_callback)(struct rpc_task *);
  61. void (*tk_action)(struct rpc_task *);
  62. const struct rpc_call_ops *tk_ops;
  63. void * tk_calldata;
  64. /*
  65. * tk_timer is used for async processing by the RPC scheduling
  66. * primitives. You should not access this directly unless
  67. * you have a pathological interest in kernel oopses.
  68. */
  69. struct timer_list tk_timer; /* kernel timer */
  70. unsigned long tk_timeout; /* timeout for rpc_sleep() */
  71. unsigned short tk_flags; /* misc flags */
  72. unsigned char tk_priority : 2;/* Task priority */
  73. unsigned long tk_runstate; /* Task run status */
  74. struct workqueue_struct *tk_workqueue; /* Normally rpciod, but could
  75. * be any workqueue
  76. */
  77. union {
  78. struct work_struct tk_work; /* Async task work queue */
  79. struct rpc_wait tk_wait; /* RPC wait */
  80. struct rcu_head tk_rcu; /* for task deletion */
  81. } u;
  82. unsigned short tk_timeouts; /* maj timeouts */
  83. size_t tk_bytes_sent; /* total bytes sent */
  84. unsigned long tk_start; /* RPC task init timestamp */
  85. long tk_rtt; /* round-trip time (jiffies) */
  86. #ifdef RPC_DEBUG
  87. unsigned short tk_pid; /* debugging aid */
  88. #endif
  89. };
  90. #define tk_auth tk_client->cl_auth
  91. #define tk_xprt tk_client->cl_xprt
  92. /* support walking a list of tasks on a wait queue */
  93. #define task_for_each(task, pos, head) \
  94. list_for_each(pos, head) \
  95. if ((task=list_entry(pos, struct rpc_task, u.tk_wait.list)),1)
  96. #define task_for_first(task, head) \
  97. if (!list_empty(head) && \
  98. ((task=list_entry((head)->next, struct rpc_task, u.tk_wait.list)),1))
  99. /* .. and walking list of all tasks */
  100. #define alltask_for_each(task, pos, head) \
  101. list_for_each(pos, head) \
  102. if ((task=list_entry(pos, struct rpc_task, tk_task)),1)
  103. typedef void (*rpc_action)(struct rpc_task *);
  104. struct rpc_call_ops {
  105. void (*rpc_call_prepare)(struct rpc_task *, void *);
  106. void (*rpc_call_done)(struct rpc_task *, void *);
  107. void (*rpc_release)(void *);
  108. };
  109. /*
  110. * RPC task flags
  111. */
  112. #define RPC_TASK_ASYNC 0x0001 /* is an async task */
  113. #define RPC_TASK_SWAPPER 0x0002 /* is swapping in/out */
  114. #define RPC_CALL_MAJORSEEN 0x0020 /* major timeout seen */
  115. #define RPC_TASK_ROOTCREDS 0x0040 /* force root creds */
  116. #define RPC_TASK_DYNAMIC 0x0080 /* task was kmalloc'ed */
  117. #define RPC_TASK_KILLED 0x0100 /* task was killed */
  118. #define RPC_TASK_SOFT 0x0200 /* Use soft timeouts */
  119. #define RPC_TASK_NOINTR 0x0400 /* uninterruptible task */
  120. #define RPC_IS_ASYNC(t) ((t)->tk_flags & RPC_TASK_ASYNC)
  121. #define RPC_IS_SWAPPER(t) ((t)->tk_flags & RPC_TASK_SWAPPER)
  122. #define RPC_DO_ROOTOVERRIDE(t) ((t)->tk_flags & RPC_TASK_ROOTCREDS)
  123. #define RPC_ASSASSINATED(t) ((t)->tk_flags & RPC_TASK_KILLED)
  124. #define RPC_DO_CALLBACK(t) ((t)->tk_callback != NULL)
  125. #define RPC_IS_SOFT(t) ((t)->tk_flags & RPC_TASK_SOFT)
  126. #define RPC_TASK_UNINTERRUPTIBLE(t) ((t)->tk_flags & RPC_TASK_NOINTR)
  127. #define RPC_TASK_RUNNING 0
  128. #define RPC_TASK_QUEUED 1
  129. #define RPC_TASK_WAKEUP 2
  130. #define RPC_TASK_HAS_TIMER 3
  131. #define RPC_TASK_ACTIVE 4
  132. #define RPC_IS_RUNNING(t) test_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  133. #define rpc_set_running(t) set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  134. #define rpc_test_and_set_running(t) \
  135. test_and_set_bit(RPC_TASK_RUNNING, &(t)->tk_runstate)
  136. #define rpc_clear_running(t) \
  137. do { \
  138. smp_mb__before_clear_bit(); \
  139. clear_bit(RPC_TASK_RUNNING, &(t)->tk_runstate); \
  140. smp_mb__after_clear_bit(); \
  141. } while (0)
  142. #define RPC_IS_QUEUED(t) test_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
  143. #define rpc_set_queued(t) set_bit(RPC_TASK_QUEUED, &(t)->tk_runstate)
  144. #define rpc_clear_queued(t) \
  145. do { \
  146. smp_mb__before_clear_bit(); \
  147. clear_bit(RPC_TASK_QUEUED, &(t)->tk_runstate); \
  148. smp_mb__after_clear_bit(); \
  149. } while (0)
  150. #define rpc_start_wakeup(t) \
  151. (test_and_set_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate) == 0)
  152. #define rpc_finish_wakeup(t) \
  153. do { \
  154. smp_mb__before_clear_bit(); \
  155. clear_bit(RPC_TASK_WAKEUP, &(t)->tk_runstate); \
  156. smp_mb__after_clear_bit(); \
  157. } while (0)
  158. #define RPC_IS_ACTIVATED(t) test_bit(RPC_TASK_ACTIVE, &(t)->tk_runstate)
  159. /*
  160. * Task priorities.
  161. * Note: if you change these, you must also change
  162. * the task initialization definitions below.
  163. */
  164. #define RPC_PRIORITY_LOW 0
  165. #define RPC_PRIORITY_NORMAL 1
  166. #define RPC_PRIORITY_HIGH 2
  167. #define RPC_NR_PRIORITY (RPC_PRIORITY_HIGH+1)
  168. /*
  169. * RPC synchronization objects
  170. */
  171. struct rpc_wait_queue {
  172. spinlock_t lock;
  173. struct list_head tasks[RPC_NR_PRIORITY]; /* task queue for each priority level */
  174. unsigned long cookie; /* cookie of last task serviced */
  175. unsigned char maxpriority; /* maximum priority (0 if queue is not a priority queue) */
  176. unsigned char priority; /* current priority */
  177. unsigned char count; /* # task groups remaining serviced so far */
  178. unsigned char nr; /* # tasks remaining for cookie */
  179. unsigned short qlen; /* total # tasks waiting in queue */
  180. #ifdef RPC_DEBUG
  181. const char * name;
  182. #endif
  183. };
  184. /*
  185. * This is the # requests to send consecutively
  186. * from a single cookie. The aim is to improve
  187. * performance of NFS operations such as read/write.
  188. */
  189. #define RPC_BATCH_COUNT 16
  190. #ifndef RPC_DEBUG
  191. # define RPC_WAITQ_INIT(var,qname) { \
  192. .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
  193. .tasks = { \
  194. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  195. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  196. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  197. }, \
  198. }
  199. #else
  200. # define RPC_WAITQ_INIT(var,qname) { \
  201. .lock = __SPIN_LOCK_UNLOCKED(var.lock), \
  202. .tasks = { \
  203. [0] = LIST_HEAD_INIT(var.tasks[0]), \
  204. [1] = LIST_HEAD_INIT(var.tasks[1]), \
  205. [2] = LIST_HEAD_INIT(var.tasks[2]), \
  206. }, \
  207. .name = qname, \
  208. }
  209. #endif
  210. # define RPC_WAITQ(var,qname) struct rpc_wait_queue var = RPC_WAITQ_INIT(var,qname)
  211. #define RPC_IS_PRIORITY(q) ((q)->maxpriority > 0)
  212. /*
  213. * Function prototypes
  214. */
  215. struct rpc_task *rpc_new_task(struct rpc_clnt *, int flags,
  216. const struct rpc_call_ops *ops, void *data);
  217. struct rpc_task *rpc_run_task(struct rpc_clnt *clnt, int flags,
  218. const struct rpc_call_ops *ops, void *data);
  219. void rpc_init_task(struct rpc_task *task, struct rpc_clnt *clnt,
  220. int flags, const struct rpc_call_ops *ops,
  221. void *data);
  222. void rpc_put_task(struct rpc_task *);
  223. void rpc_exit_task(struct rpc_task *);
  224. void rpc_release_calldata(const struct rpc_call_ops *, void *);
  225. void rpc_killall_tasks(struct rpc_clnt *);
  226. void rpc_execute(struct rpc_task *);
  227. void rpc_init_priority_wait_queue(struct rpc_wait_queue *, const char *);
  228. void rpc_init_wait_queue(struct rpc_wait_queue *, const char *);
  229. void rpc_sleep_on(struct rpc_wait_queue *, struct rpc_task *,
  230. rpc_action action, rpc_action timer);
  231. void rpc_wake_up_task(struct rpc_task *);
  232. void rpc_wake_up(struct rpc_wait_queue *);
  233. struct rpc_task *rpc_wake_up_next(struct rpc_wait_queue *);
  234. void rpc_wake_up_status(struct rpc_wait_queue *, int);
  235. void rpc_delay(struct rpc_task *, unsigned long);
  236. void * rpc_malloc(struct rpc_task *, size_t);
  237. void rpc_free(struct rpc_task *);
  238. int rpciod_up(void);
  239. void rpciod_down(void);
  240. int __rpc_wait_for_completion_task(struct rpc_task *task, int (*)(void *));
  241. #ifdef RPC_DEBUG
  242. void rpc_show_tasks(void);
  243. #endif
  244. int rpc_init_mempool(void);
  245. void rpc_destroy_mempool(void);
  246. extern struct workqueue_struct *rpciod_workqueue;
  247. static inline void rpc_exit(struct rpc_task *task, int status)
  248. {
  249. task->tk_status = status;
  250. task->tk_action = rpc_exit_task;
  251. }
  252. static inline int rpc_wait_for_completion_task(struct rpc_task *task)
  253. {
  254. return __rpc_wait_for_completion_task(task, NULL);
  255. }
  256. #ifdef RPC_DEBUG
  257. static inline const char * rpc_qname(struct rpc_wait_queue *q)
  258. {
  259. return ((q && q->name) ? q->name : "unknown");
  260. }
  261. #endif
  262. #endif /* _LINUX_SUNRPC_SCHED_H_ */