Presentation is loading. Please wait.

Presentation is loading. Please wait.

1 Concurrent Programming. 2 Outline Concurrency with I/O multiplexing Synchronization –Shared variables –Synchronizing with semaphores Suggested reading:

Similar presentations


Presentation on theme: "1 Concurrent Programming. 2 Outline Concurrency with I/O multiplexing Synchronization –Shared variables –Synchronizing with semaphores Suggested reading:"— Presentation transcript:

1 1 Concurrent Programming

2 2 Outline Concurrency with I/O multiplexing Synchronization –Shared variables –Synchronizing with semaphores Suggested reading: –12.2, 12.4, 12.5.1~3

3 I/O Multiplexing Interfaces 3 #include int select(int nfds, fd_set *readfds, fd_set *writefds, fd_set *exceptfds, struct timeval *timeout); Returns nonzero count of ready descriptors, −1 on error FD_ZERO(fd_set *fdset); /* Clear all bits in fdset */ FD_CLR(int fd, fd_set *fdset); /* Clear bit fd in fdset */ FD_SET(int fd, fd_set *fdset); /* Turn on bit fd in fdset */ FD_ISSET(int fd, fd_set *fdset); /* Is bit fd in fdset on? */ Macros for manipulating descriptor sets

4 4 select() function select() –Sleeps until one or more file descriptors in the set readset are ready for reading –Returns the number of ready descriptors and sets each bit of readset to indicate the ready status of its corresponding descriptor

5 5 select() function readset –bit vector (max FD_SETSIZE bits) that indicates membership in a descriptor set –if bit k is 1, then descriptor k is a member of the descriptor set maxfd –Cardinality of the readset –tests descriptors 0, 1, 2,..., maxfd-1 for set membership

6 Working Model of select Monitor a set of data structure –Descriptor sets: fd_set 6 FD_ZERO(&read_set); FD_SET(3,&read_set); FD_SET(STDIN,&read_set); FD_CLR(3,&read_set); stdin

7 Working Model of select 10 clientfd 7 4 12 5 0 1 2 3 4 5 6 7 8 9 Active Inactive Active Never Used listenfd = 3 10 clientfd 7 4 12 5 listenfd = 3 Active Descriptors Pending Inputs Read 7

8 8 Concurrent Programming with I/O Multiplexing FD_ZERO(&read_set); FD_SET(STDIN_FILENO, &read_set); FD_SET(listenfd, &read_set); while(1) { ready_set = read_set; Select(listenfd+1, &ready_set, NULL, NULL, NULL); if (FD_ISSET(STDIN_FILENO, &ready_set) /*read command line from stdin */ command(); if (FD_ISSET(listenfd, &ready_set)){ connfd = Accept(listenfd, (SA *)&clientaddr, &clientlen); echo(connfd); }

9 9 Concurrent Programming with I/O Multiplexing void command(void) { char buf[MAXLINE]; if (!Fgets(buf, MAXLINE, stdin)) exit(0);/* EOF */ /*Process the input command */ printf(“%s”, buf); }

10 Concurrent Event-Driven Programming Model State machine –States –Input events –transitions 10

11 Concurrent Echo Server with I/O Multiplexing 1 #include "csapp.h" 2 3 typedef struct{/*Represents a pool of connected descriptors*/ 4 int maxfd; /* Largest descriptor in read_set */ 5 fd_set read_set; /* Set of all active descriptors */ 6 fd_set ready_set;/*Subset of descriptors ready for reading*/ 7 int nready; /* Number of ready descriptors from select */ 8 int maxi; /* Highwater index into client array */ 9 int clientfd[FD_SETSIZE]; /* Set of active descriptors */ 10 rio_t clientrio[FD_SETSIZE]; /* Set of active read buffers*/ 11 } pool; 12 13 int byte_cnt = 0; /* Counts total bytes received by server */ 11

12 Concurrent Echo Server with I/O Multiplexing 15 int main(int argc, char **argv) 16 { 17 int listenfd, connfd, port; 18 socklen_t clientlen = sizeof(struct sockaddr_in); 19 struct sockaddr_in clientaddr; 20 static pool pool; 21 22 if (argc != 2) { 23 fprintf(stderr, "usage: %s \n", argv[0]); 24 exit(0); 25 } 26 port = atoi(argv[1]); 27 28 listenfd = Open_listenfd(port); 29 init_pool(listenfd, &pool); 12

13 Concurrent Echo Server with I/O Multiplexing 30 while (1) { 31/*Wait for listening/connected descriptor(s) to become ready*/ 32 pool.ready_set = pool.read_set; 33 pool.nready = Select(pool.maxfd+1, &pool.ready_set, NULL, NULL, NULL); 34 35 /* If listening descriptor ready, add new client to pool */ 36 if (FD_ISSET(listenfd, &pool.ready_set)) { 37 connfd = Accept(listenfd, (SA *)&clientaddr, &clientlen); 38 add_client(connfd, &pool); 39 } 40 41 /* Echo a text line from each ready connected descriptor */ 42 check_clients(&pool); 43 } 44 } 13

14 Concurrent Echo Server with I/O Multiplexing 1 void init_pool(int listenfd, pool *p) 2 { 3 /* Initially, there are no connected descriptors */ 4 int i; 5 p->maxi = -1; 6 for (i=0; i< FD_SETSIZE; i++) 7 p->clientfd[i] = -1; 8 9 /* Initially, listenfd is only member of select read set */ 10 p->maxfd = listenfd; 11 FD_ZERO(&p->read_set); 12 FD_SET(listenfd, &p->read_set); 13 } 14

15 Concurrent Echo Server with I/O Multiplexing 1 void add_client(int connfd, pool *p){ 2 int i; 3 p->nready--; 4 for (i = 0; i < FD_SETSIZE; i++) /*Find an available slot*/ 5 if (p->clientfd[i] < 0) { 6 /* Add connected descriptor to the pool */ 7 p->clientfd[i] = connfd; 8 Rio_readinitb(&p->clientrio[i], connfd); 9 10 /* Add the descriptor to descriptor set */ 11 FD_SET(connfd, &p->read_set); 12 13 /* Update max descriptor and pool highwater mark */ 14 if (connfd > p->maxfd) p->maxfd = connfd; 15 if (i > p->maxi) p->maxi = i; 16 break; 17 } 18 if (i == FD_SETSIZE) /* Couldn’t find an empty slot */ 19 app_error("add_client error: Too many clients"); 20 } 15

16 Concurrent Echo Server with I/O Multiplexing 1 void check_clients(pool *p) { 2 int i, connfd, n; char buf[MAXLINE]; 4 rio_t rio; 6 for (i = 0; (i maxi) && (p->nready > 0); i++) { 8 connfd = p->clientfd[i]; 9 rio = p->clientrio[i]; 10 /*If the descriptor is ready, echo a text line from it */ 12 if ((connfd > 0) && (FD_ISSET(connfd, &p->ready_set))) { 13 p->nready--; 14 if ((n = Rio_readlineb(&rio, buf, MAXLINE)) != 0) { 15 byte_cnt += n; 16 printf("Server received %d (%d total) bytes on fd 17 %d\n", n, byte_cnt, connfd); 18 Rio_writen(connfd, buf, n); 19 } else {/* EOF detected, remove descriptor from pool */ 23 Close(connfd); 24 FD_CLR(connfd, &p->read_set); 25 p->clientfd[i] = -1; 26 } 27 } 28 } 29 } 16

17 Pros and Cons of I/O Multiplexing + One logical control flow. + Can single-step with a debugger. + No process or thread control overhead. –Design of choice for high-performance Web servers and search engines. – Significantly more complex to code than process- or thread-based designs. – Hard to provide fine-grained concurrency E.g., our example will hang up with partial lines. – Cannot take advantage of multi-core Single thread of control 17

18 Approaches to Concurrency ProcessesProcesses –Hard to share resources: Easy to avoid unintended sharing –High overhead in adding/removing clients ThreadsThreads –Easy to share resources: Perhaps too easy –Medium overhead –Not much control over scheduling policies –Difficult to debug: event orderings not repeatable I/O MultiplexingI/O Multiplexing –Tedious and low level –Total control over scheduling –Very low overhead –Cannot create as fine grained a level of concurrency –Does not make use of multi-core 18

19 Potential Form of Unintended Sharing main thread peer 1 while (1) { int connfd = Accept(listenfd, (SA *) &clientaddr, &clientlen); Pthread_create(&tid, NULL, echo_thread, (void *) &connfd); } connfd Main thread stack vargp Peer 1 stack vargp Peer 2 stack peer 2 connfd = connfd 1 connfd = *vargp connfd = connfd 2 connfd = *vargp Race! Why would both copies of vargp point to same location? 19

20 20 Shared variables in threaded C programs Which variables in a C program are shared or not? –What is the memory model for threads? –How are instances of the variable mapped to memory? –How many threads reference each of these instances?

21 21 Threads memory model Conceptual model: –Each thread runs in the context of a process. –Each thread has its own separate thread context Thread ID, stack, stack pointer, program counter, condition codes, and general purpose registers –All threads share the remaining process context Code, data, heap, and shared library segments of the process virtual address space Open files and installed handlers

22 22 Threads memory model Operationally, this model is not strictly enforced: –While register values are truly separate and protected.... –Any thread can read and write the stack of any other thread. The MISMATCH between the CONCEPTUAL and OPERATION MODEL is a source of confusion and errors

23 23 Shared variable analysis 1#include "csapp.h" 2#define N 2 3void *thread(void *vargp); 4 5char **ptr; /* global variable */ 6

24 24 Shared variable analysis 7 int main() 8 { 9 int i; 10 pthread_t tid; 11 char *msgs[N] = { 12 "Hello from foo", 13 "Hello from bar" 14 }; 15 16 ptr = msgs; 17 for (i = 0; i < N; i++) 18 Pthread_create(&tid, NULL, thread, (void *)i); 19 Pthread_exit(NULL); 20 } 1#include "csapp.h" 2#define N 2 3void *thread(void *vargp); 4 5char **ptr; 6 /* global variable */

25 25 Shared variable analysis 21 void *thread(void *vargp) 22 { 23 int myid = (int)vargp; 24 static int cnt = 0; 25 26 printf("[%d]:%s(cnt=%d)\n", myid, ptr[myid], ++cnt); 27 }

26 Mapping Variable Instances to Memory Global variables –Def: Variable declared outside of a function –Virtual memory contains exactly one instance of any global variable Local variables –Def: Variable declared inside function without static attribute –Each thread stack contains one instance of each local variable Local static variables –Def: Variable declared inside function with the static attribute –Virtual memory contains exactly one instance of any local static variable.

27 27 Shared variable analysis Which variables are shared? Variable Referenced byReferenced by Referenced by instancemain thread?peer thread-0?peer thread-1? ptryesyesyes cntnoyesyes i.myesnono msgs.myesyesyes myid.p0noyesno myid.p1nonoyes

28 28 Shared variable analysis Answer: A variable x is shared iff multiple threads reference at least one instance of x Thus: –ptr, cnt, and msgs are shared. –i and myid are NOT shared.

29 29 Shared variable analysis 9 int main() 10 { 11 pthread_t tid1, tid2; 12 13 Pthread_create(&tid1, NULL, count, NULL); 14 Pthread_create(&tid2, NULL, count, NULL); 15 Pthread_join(tid1, NULL); 16 Pthread_join(tid2, NULL); 17 18 if (cnt != (unsigned)NITERS*2) 19 printf("BOOM! cnt=%d\n", cnt); 20 else 21 printf("OK cnt=%d\n", cnt); 22 exit(0); 23 } 1 #include "csapp.h" 2 3 #define NITERS 100000000 4 void *count(void *arg); 5 6 /* shared variable */ 7 unsigned int cnt = 0; 8

30 30 Shared variable analysis 24 25 /* thread routine */ 26 void *count(void *arg) 27 { 28 int i; 29 for (i=0; i<NITERS; i++) 30 cnt++; 31 return NULL; 32 }

31 31 Shared variable analysis cnt should be equal to 200,000,000. What went wrong?! linux> badcnt BOOM! cnt=198841183 linux> badcnt BOOM! cnt=198261801 linux> badcnt BOOM! cnt=198269672

32 32 for (i=0; i<NITERS; i++) cnt++; cnt++; C code for thread i Assembly code for counter loop

33 33.L9: movl -4(%ebp),%eax #i:-4(%ebp) cmpl $99999999,%eax jle.L12 jmp.L10.L12: movl cnt,%eax # Load leal 1(%eax),%edx # Update movl %edx,cnt # Store.L11: movl -4(%ebp),%eax leal 1(%eax),%edx movl %edx,-4(%ebp) jmp.L9.L10: Asm code for thread i Head (H i ) Tail (T i ) Load cnt (L i ) Update cnt (U i ) Store cnt (S i ) Assembly code for counter loop

34 34 Concurrent execution Key idea: In general, any sequentially consistent interleaving is possible, but some are incorrect! –I i denotes that thread i executes instruction I –%eax i is the contents of %eax in thread i’s context

35 35 Concurrent execution H1H1 L1L1 U1U1 S1S1 H2H2 L2L2 U2U2 S2S2 T2T2 T1T1 1 1 1 1 2 2 2 2 2 1 - 0 1 1 - - - - - 1 0 0 0 1 1 1 1 2 2 2 i(thread)instr i cnt%eax 1 OK - - - - - 1 2 2 2 - %eax 2

36 36 Concurrent execution (cont) Incorrect ordering: two threads increment the counter, but the result is 1 instead of 2. H1H1 L1L1 U1U1 H2H2 L2L2 S1S1 T1T1 U2U2 S2S2 T2T2 1 1 1 2 2 1 1 2 2 2 - 0 1 - - 1 1 - - - 0 0 0 0 0 1 1 1 1 1 i(thread) instr i cnt %eax 1 - - - - - - 1 1 1 %eax 2 0

37 37 A progress graph depicts the discrete execution state space of concurrent threads. Each axis corresponds to the sequential order of instructions in a thread. Each point corresponds to a possible execution state (Inst 1, Inst 2 ). E.g., (L 1, S 2 ) denotes state where thread 1 has completed L 1 and thread 2 has completed S 2. H1H1 L1L1 U1U1 S1S1 T1T1 H2H2 L2L2 U2U2 S2S2 T2T2 Thread 1 Thread 2 (L 1, S 2 ) Progress graphs

38 38 A trajectory is a sequence of legal state transitions that describes one possible concurrent execution of the threads. Example: H1, L1, U1, H2, L2, S1, T1, U2, S2, T2 H1H1 L1L1 U1U1 S1S1 T1T1 H2H2 L2L2 U2U2 S2S2 T2T2 Thread 1 Thread 2 Trajectories in progress graphs

39 39 L, U, and S form a critical section with respect to the shared variable cnt. Instructions in critical sections (write to some shared variable) should not be interleaved. Sets of states where such interleaving occurs form unsafe regions. H1H1 L1L1 U1U1 S1S1 T1T1 H2H2 L2L2 U2U2 S2S2 T2T2 Thread 1 Thread 2 Unsafe region critical section wrt cnt Critical sections and unsafe regions

40 40 Def: A trajectory is safe iff it doesn’t touch any part of an unsafe region. Claim: A trajectory is correct (write cnt) iff it is safe. critical section wrt cnt H1H1 L1L1 U1U1 S1S1 T1T1 H2H2 L2L2 U2U2 S2S2 T2T2 Thread 1 Thread 2 Unsafe region Unsafe trajectory Safe trajectory critical section wrt cnt Safe and unsafe trajectories

41 41 Synchronizing with semaphores Dijkstra's P and V operations on semaphores –semaphore –semaphore: non-negative integer synchronization variable. P(s): [ while (s == 0) wait(); s--; ] –Dutch for "Proberen" (test) V(s): [ s++; ] –Dutch for "Verhogen" (increment)

42 42 Synchronizing with semaphores Dijkstra's P and V operations on semaphores –OS guarantees that operations between brackets [ ] are executed indivisibly. Only one P or V operation at a time can modify s. When while loop in P terminates, only that P can decrement s. Semaphore invariant: (s >= 0)

43 43 #include int sem_init(sem_t *sem, 0, unsigned int value); int sem_wait(sem_t *s); /* P(s) */ int sem_post(sem_t *s); /* V(s) */ #include “csapp.h” void P(sem_t *s);/* Wrapper function for sem_wait */ void V(sem_t *s);/* Wrapper function for sem_wait */ POSIX semaphores

44 44 #include "csapp.h" #define NITERS 10000000 unsigned int cnt; /* counter */ sem_t sem; /* semaphore */ int main() { pthread_t tid1, tid2; Sem_init(&sem, 0, 1); /* create 2 threads and wait */... if (cnt != (unsigned)NITERS*2) printf("BOOM! cnt=%d\n", cnt); else printf("OK cnt=%d\n", cnt); exit(0); } Sharing with POSIX semaphores

45 45 /* thread routine */ void *count(void *arg) { int i; for (i=0; i<NITERS; i++) { P(&sem); cnt++; V(&sem); } return NULL; } Sharing with POSIX semaphores

46 46 Provide mutually exclusive access to shared variable by surrounding critical section with P and V operations on semaphore s (initially set to 1). Semaphore invariant creates a forbidden region that encloses unsafe region and is never touched by any trajectory. Safe sharing with semaphores

47 47 Next Semaphores for Shared Resources –Producer-consumer problem –Readers-writers problem Example –Concurrent server based on pthreading Concurrency Issues –Thread safe, reentrant, race, and deadlock Suggested reading: –12.5.4-5, 12.7


Download ppt "1 Concurrent Programming. 2 Outline Concurrency with I/O multiplexing Synchronization –Shared variables –Synchronizing with semaphores Suggested reading:"

Similar presentations


Ads by Google