-
Notifications
You must be signed in to change notification settings - Fork 8
/
se350.tex
917 lines (859 loc) · 31.7 KB
/
se350.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
\documentclass[]{article}
\usepackage{etex}
\usepackage[margin = 1.5in]{geometry}
\setlength{\parindent}{0in}
\usepackage{amsmath}
\usepackage{amsfonts}
\usepackage{amssymb}
\usepackage{amsthm}
\usepackage{listings}
\usepackage{color}
\usepackage{mathtools}
\usepackage{multicol}
\usepackage{pgfplots}
\usepackage{qtree}
\usepackage{xytree}
\usepackage[lined]{algorithm2e}
\usepackage{float}
\usepackage[T1]{fontenc}
\usepackage{ae,aecompl}
\usepackage[pdftex,
pdfauthor={Michael Noukhovitch},
pdftitle={SE 350: Operating Systems},
pdfsubject={Lecture notes from SE 350 at the University of Waterloo},
pdfproducer={LaTeX},
pdfcreator={pdflatex}]{hyperref}
\usepackage{cleveref}
\usepackage{enumitem}
\definecolor{dkgreen}{rgb}{0,0.6,0}
\definecolor{gray}{rgb}{0.5,0.5,0.5}
\definecolor{mauve}{rgb}{0.58,0,0.82}
\lstset{
language=C,
aboveskip=3mm,
belowskip=3mm,
showstringspaces=false,
columns=flexible,
basicstyle={\small\ttfamily},
numbers=none,
numberstyle=\tiny\color{gray},
keywordstyle=\color{blue},
commentstyle=\color{dkgreen},
stringstyle=\color{mauve},
breaklines=true,
breakatwhitespace=true,
tabsize=4
}
\theoremstyle{definition}
\newtheorem*{defn}{Definition}
\newtheorem{ex}{Example}[section]
\newtheorem*{theorem}{Theorem}
\setlength{\marginparwidth}{1.5in}
\setlength{\algomargin}{0.75em}
\DeclarePairedDelimiter{\set}{\lbrace}{\rbrace}
\definecolor{darkish-blue}{RGB}{25,103,185}
\usepackage{hyperref}
\hypersetup{
colorlinks,
citecolor=darkish-blue,
filecolor=darkish-blue,
linkcolor=darkish-blue,
urlcolor=darkish-blue
}
\newcommand{\lecture}[1]{\marginpar{{\footnotesize $\leftarrow$ \underline{#1}}}}
\makeatletter
\def\blfootnote{\gdef\@thefnmark{}\@footnotetext}
\makeatother
\begin{document}
\let\ref\Cref
\title{\bf{SE 350: Operating Systems}}
\date{Winter 2015, University of Waterloo \\ \center Notes written from Thomas Reidemeister's lectures.}
\author{Michael Noukhovitch}
\maketitle
\newpage
\tableofcontents
\newpage
\section{Introduction}
\subsection{Definitions}
\textbf{Operating Systems}: a standardized abstraction from hardware that:
\begin{itemize}
\item manages resources
\item provides set of services
\item consumes resources
\end{itemize}
\textbf{Instruction execution}:
\begin{enumerate}
\item fetches instruction into IR
\item executes instruction
\end{enumerate}
In reality, it is a bit more complicated: there is a pipeline, out of order execution...
\subsection{Interrupts}
\subsubsection{Type of interrupts}
\begin{itemize}
\item \textbf{program} result of instruction execution e.g. arithmetic overflow
\item \textbf{timer} timer within process, allows OS to perform regular functions
\item \textbf{I/O} generated by I/O controller
\item \textbf{hardware failure} power failure or memory parity failure
\end{itemize}
\subsubsection{How interrupts work}
Hardware:
\begin{enumerate}
\item interrupt issued
\item processor finishes current instruction
\item acknowledge interrupt
\item push PSW and PC onto control stack
\item load new PC
\end{enumerate}
Software:
\begin{enumerate}[resume]
\item save remainder of process state
\item interrupt
\item restore process state information
\item restore PSW and PC
\end{enumerate}
\subsection{Multiple interrupts}
Two ways of handling an interrupt during an interrupt:
\subsubsection{Sequential}
Ignore any interrupts when you are in an interrupt and when done, check for interrupts that occurred.
\subsubsection{Nested}
If the second interrupt is of higher priority, recurse into it. Otherwise, wait until interrupt is finished.
\subsection{Peripheral interrupts}
Peripherals such as hard drives take a while to complete their action. As opposed to waiting and wasting that time, we use an interrupt to execute other instructions while our I/O process runs.
\subsubsection{Programmed I/O}
No interrupts occur, just wait until the I/O is complete
\subsubsection{Interrupt-driven I/O}
Processor interrupted when I/O is ready and all writes/reads are passed through CPU into memory. This is faster, since there is no waiting.
\subsubsection{Direct memory access}
Transfers a block of data directly into memory and interrupt is sent when process is complete. This is more efficient because data does not need to go through CPU. Not always available (e.g. external peripheral)
\subsection{Memory hierarchy}
Major constraints in memory:
\begin{itemize}
\item size
\item speed
\item cost
\end{itemize}
\subsubsection{Hierarchy}
Top: Inboard memory \\
Middle: Outboard storage \\
Bottom: Offline storage \\
\subsubsection{Cache}
\textbf{Cache}: small, fast memory, invisible to the OS, that speeds up accesses exploiting the principle of locality
\section{Operating Systems Overview}
\subsection{Definition}
\textbf{Operating System} a program that controls the execution of applications and is a standard interface between hardware and software
\begin{itemize}
\item \textbf{convenience}: need no knowledge of hardware
\item \textbf{efficiency}: move optimization from devs to tools
\item \textbf{ability to evolve}: can replace internals
\end{itemize}
\textbf{Kernel}: portion of the OS in main memory; a nucleus that contains frequently used functions
\textbf{OS services}
\begin{itemize}
\item program development and execution
\item access \& control of access to I/O
\item system access control
\item error detection and response
\item accounting
\end{itemize}
\subsection{OS Innovations}
\subsubsection{Hardware Features}
\begin{itemize}
\item \textbf{Memory protection}: do not allow memory containing monitor to be altered
\item \textbf{Timer}: prevents a job from monopolizing system
\item \textbf{Privileged instruction}: certain instructions can only be executed by the monitor (e.g. I/O)
\item \textbf{Interrupts}
\end{itemize}
\subsubsection{Modes of operation}
To protect users from each other (and the kernel from user), we have two modes:
\begin{itemize}
\item \textbf{User mode}: not privileged
\item \textbf{Kernel mode}: privileged and access to protected memory
\end{itemize}
\subsubsection{Multiprogramming}
When one job needs to wait for I/O, the processor can switch to another job. The timer is also used to switch processes and stop monopolization.
\begin{itemize}
\item maximize processor use
\item use job control language
\end{itemize}
\subsubsection{Time Sharing}
Processor time is shared by multiple users
\begin{itemize}
\item minimize response time
\end{itemize}
\subsection{Major Achievements}
\subsubsection{Processes}
\textbf{Process}: a program in execution
\begin{itemize}
\item a program
\item associated data
\item execution content (needed by OS)
\end{itemize}
\subsubsection{Memory Management}
\begin{itemize}
\item process isolation
\item automatic allocation
\begin{itemize}
\item virtual memory: allows programmer to address memory without regard to physical addressing
\item paging: allows processes to be comprised of fixed-size blocks (pages)
\end{itemize}
\item swap program code
\item shared memory
\item long term storage
\end{itemize}
\subsubsection{Information Security}
\begin{itemize}
\item availability: protecting system against interruption (downtime)
\item confidentiality: authorizing data (chmod)
\item data integrity: protect from modification
\item authenticity: verifying identity of users
\end{itemize}
\subsubsection{Scheduling and Resource Management}
\begin{itemize}
\item fairness: give equal access to resources
\item differential responsiveness: discriminate by class of jobs
\item efficiency: maximize throughput
\end{itemize}
\subsubsection{System Structure}
The system as a hierarchical structure with each level relying on lower levels for its functions.
\begin{enumerate}
\item circuits: registers, gates, buffers
\item instructions: add, subtract, load, store
\item procedures: call stack, subroutine
\item interrupts
\item processes: suspend, wait, resume
\item local store: blocks of data, allocate
\item virtual memory: segments, pages
\item communications: pipes
\item file system: files
\item external devices
\item directories
\item user process
\item shell
\end{enumerate}
\subsection{Modern Operating System}
Developments leading to modern operating systems:
\begin{itemize}
\item \textbf{Microkernel architecture}: only essentials to kernel, everything else in user space (e.g. QNX)
\item \textbf{Multithreading}: process divided into concurrent threads
\item \textbf{Symmetric multiprocessing}: multiple processors share main memory and I/O
\item \textbf{Distributed OS}: illusion of a single main and secondary memory
\item \textbf{Asymmetric multiprocessing}: one big processor controls many small ones
\item \textbf{Object oriented design}: customize OS without disrupting system
\end{itemize}
\section{Processes}
\subsection{Process Elements}
\textbf{Process Control Block (PCB)}: data structure that contains process elements
\begin{itemize}
\item \textbf{identifier}
\item \textbf{state}
\item \textbf{priority}
\item \textbf{memory pointers}
\item \textbf{context data}: registers, PSW, PC
\item \textbf{I/O status information}
\item \textbf{accounting information}: processor time, time limits, threads
\end{itemize}
\subsection{Process Model}
\subsubsection{Five State Model}
\begin{itemize}
\item \textbf{running}: currently executing
\item \textbf{ready}: can be executed
\item \textbf{waiting}: can't execute, blocked by something
\item \textbf{new}
\item \textbf{exit}: halted or aborted
\end{itemize}
Since the processor is faster than I/O, we may sometimes want to admit more processes even after we are out of memory. To do this we \textbf{swap} out processes to disk that are waiting and we get two new states:
\begin{itemize}
\item \textbf{blocked/suspend}
\item \textbf{ready/suspend}
\end{itemize}
\subsubsection{Process Creation}
\begin{itemize}
\item new batch job
\item interactive logon
\item created by OS for a service
\item created by existing process
\end{itemize}
\subsubsection{Process Termination}
\begin{multicols}{2}
\begin{itemize}
\item normal completion
\item time limit exceeded
\item time overrun
\item memory unavailable
\item bounds error (segfault)
\item protection error
\item arithmetic error
\item I/O error
\item invalid instruction
\item privileged instruction
\item data misuse
\item OS intervention
\item parent termination
\item parent request
\end{itemize}
\end{multicols}
\subsubsection{Process Suspension}
\begin{itemize}
\item swapping
\item other OS reason
\item user request
\item timing
\item parent request
\end{itemize}
\subsubsection{Process Switching}
\begin{description}
\item[clock interrupt]: maximum allowed time surpasses
\item[I/O interrupt]: I/O completed
\item[memory fault]: memory address is in virtual memory, bring into main memory
\item[trap]: error or exception, used for debugging
\item[supervisor call]: switch to kernel process
\end{description}
\subsection{OS Control Structures}
\subsubsection{Memory Tables}
\begin{itemize}
\item keeps track of main and secondary memory
\item protection for access to shared memory
\item information to manage virtual memory
\end{itemize}
\subsubsection{I/O Tables}
\begin{itemize}
\item manages I/O devices (status and availability)
\item location in main memory for transferring I/O
\end{itemize}
\subsubsection{File Tables}
\begin{itemize}
\item manages existance and location of files
\item attributes and status of files (e.g. rwxr...)
\end{itemize}
\subsubsection{Process Table}
\begin{itemize}
\item where process is located in memory as a \textbf{process image}:
\begin{itemize}
\item program
\item data
\item system stack
\item PCB
\end{itemize}
\end{itemize}
\section{Threads, SMP, Microkernels}
\subsection{Threads}
\textbf{Thread}: execution entity under a process
\textbf{Multithreading}: multiple threads of execution within a single process
\subsubsection{Benefits}
\begin{itemize}
\item less time to create/terminate (no kernel resource allocation)
\item less time to switch between threads v. processes
\item sharing memory between threads
\end{itemize}
\subsubsection{States}
\begin{itemize}
\item spawn
\item block
\item unblock
\item finish
\end{itemize}
\subsubsection{Approaches}
\textbf{User-level threads}:
\begin{itemize}
\item thread management by application
\item less switching overhead
\item scheduling is app-specific
\item can run on any OS
\end{itemize}
\textbf{Kernel-level threads}:
\begin{itemize}
\item thread management by kernel
\item can schedule threads on multiple processors
\item OS calls blocking only the thread
\end{itemize}
\textbf{Combined approach}:
\begin{itemize}
\item threads can be grouped to kernel threads
\item kernel knows/schedules threads and processes
\end{itemize}
\subsection{Microkernel}
\textbf{Microkernel}: small operating system that only contains essential core functions
\subsubsection{Benefits}
\begin{itemize}
\item uniform interface on request by process
\item extensibility
\item flexibility
\item portability
\item reliability
\item distributed systems support
\item object-oriented OS
\end{itemize}
\subsubsection{Design}
\begin{itemize}
\item low-level memory management: map each virtual page to a physical page
\item interprocess communication: copying messages
\item I/O and interrupt management: interrupts as messages, no knowledge of IRQ handlers
\end{itemize}
\section{Concurrency}
\subsection{Terms}
\begin{description}
\item[critical section]: section of code that may have concurrency issues (shared memory \ldots)
\item[deadlock]: processes are locked because each is waiting for the other
\item[livelock]: two processes cycle uselessly because of the others' cycling
\item[mutual exclusion]: shared resources may not be modified concurrently
\item[race condition]: multiple threads/processes read and write shared memory concurrently
\item[starvation]: a runnable process is overlooked indefinitely by the scheduler
\end{description}
For mutual exclusion requires:
\begin{itemize}
\item 1 process at a time (per resource) in the critical section
\item process halting in non-critical section must not interfere with other processes
\item no deadlock or starvation
\item no delay to critical section if it is unblocked
\item no assumptions about relative process speeds or quantities
\item process remains inside critical section for finite number of time
\end{itemize}
and can be achieved by any of the following options:
\subsection{Hardware Support}
\subsubsection{Interrupt Disabling}
\begin{itemize}
\item guarantees mutual exclusion on uniprocessor
\item no guarantee for mutliprocessor
\item a process runs until interrupt or it invokes OS service
\end{itemize}
\subsubsection{Machine Instructions}
\begin{itemize}
\item performed in single instruction cycle
\item access to memory is blocked for other instructions
\end{itemize}
\textbf{Advantages}:
\begin{itemize}
\item applicable to any number of processes (single processor or multiprocessor)
\item simple and easy to verify
\item can support multiple critical sections
\end{itemize}
\textbf{Disadvantages}:
\begin{itemize}
\item busy-waiting consumes time
\item starvation possible (one proc leaves critical section and multiple waiting)
\item deadlock possible (low proiority holds critical section but OS switched to high proirity)
\item pipleline stalls
\end{itemize}
\subsection{Semaphore}
\textbf{Semaphore}: variable with integer value used for signaling
\subsubsection{Operations}
\begin{itemize}
\item initialize with non-negative value
\item \textbf{wait} decrements value
\item \textbf{signal} increments value
\item only the process with the lock can release the lock
\end{itemize}
\begin{ex}
Semaphore primitive
\begin{lstlisting}[language=C]
struct semaphore {
int count;
queueType queue;
}
void semWait(semaphore s) {
s.count--;
if (s.count < 0) {
s.queue.push(thisProcess);
block(thisProcess);
}
}
void semSignal(semaphore s) {
s.count++;
if (s.count <= 0) {
nextProcess = s.queue.pop();
addToReady(nextProcess);
}
}
\end{lstlisting}
\end{ex}
\subsubsection{Producer-Consumer}
Concurrency problem where a producer adds elements to a buffer and a consumer takes them out, we can use extra semaphores to keep track of other critical values
\begin{ex}
Infinite-buffer producer/consumer
\begin{lstlisting}[language=C]
semaphore n = 0;
semaphore s = 1;
void producer() {
while (true) {
produce();
semWait(s);
append();
semSignal(s);
semSignal(n);
}
}
void consumer() {
while (true) {
semWait(n);
semWait(s);
take();
semSignal(s);
consume();
}
}
\end{lstlisting}
\end{ex}
\subsubsection{Reader-Writer}
Concurrency problem where many readers can read from a file, but only one writer may write to it and all readers must wait for the writer
\begin{ex}
Readers/Writers problem with Reader priority
\begin{lstlisting}[language=C]
int readCount;
semaphore x = 0, wsem = 1;
void reader() {
while (true) {
semWait(x);
readCount++;
if (readCount == 1)
semWait(wsem);
semSignal(x);
READUNIT();
semWait(x);
readCount--;
if (readCount == 0)
semSignal(wsem);
semSignal(x);
}
}
void writer() {
while (true) {
semWait(wsem);
WRITEUNIT();
semSignal(wsem);
}
}
\end{lstlisting}
\end{ex}
\subsection{Monitor}
\textbf{Monitor}: a software module that controls entry to data using a mutex
\begin{itemize}
\item uses condition variables for signaling
\item unused signals are lost \textit{(diff from semaphore)}
\end{itemize}
\subsection{Message Passing}
Enforce mutual exclusion by information exchange
\subsubsection{Synchronization}
\begin{description}
\item[Blocking]: sender and receiver are blocked until messaged delivered (rendevous)
\item[Non-blocking Send]: only receiver blocked until message arrives
\item[Non-blocking]: no waiting
\end{description}
\subsubsection{Addressing}
\textbf{Direct Addressing}:
\begin{itemize}
\item send has a specfic identifier of destination process
\item receive knows which process to expect OR
\item receieve uses source parameter to return value after message received
\end{itemize}
\textbf{Indirect Addressing}:
\begin{itemize}
\item messaged sent to shared data structure made of queues (\textbf{mailboxes})
\item processes send messages to a mailbox for others to pick up
\end{itemize}
\begin{ex}
Indirect messaging
\begin{lstlisting}
const int n = /* number of processes */
create_mailbox(mutex);
void P (int i) {
message msg;
while (true) {
receieve(mutex, msg);
/* critical section */
send(mutex, msg);
/* remainder */
}
}
\end{lstlisting}
\end{ex}
\section{Deadlocks}
\subsection{Introduction}
\begin{description}
\item[Deadlock]: permanent blocking of a set of process over shared resources
\end{description}
There are two types of resources for the purporse of deadlocks:
\begin{itemize}
\item \textbf{reusable}: deadlock occurs if processes hold the resource the other requests
\item \textbf{consumable}: deadlock occurs if a receive() message is blocking
\end{itemize}
Conditions for a deadlock:
\begin{itemize}
\item mutual exclusion: necessary
\item no preemption: necessary
\item hold and wait: necessary
\item circular wait: sufficient
\end{itemize}
\subsection{Prevention}
\textbf{Deadlock Prevention}: design the system to prevent at least one of the conditions:
\begin{itemize}
\item mutual exlcusion
\begin{itemize}
\item must be OS supported
\item often times, impossible to remove
\end{itemize}
\item no preemption
\begin{itemize}
\item if request denied, release all resources and re-request
\item use preemption to release all resources by lower priority
\end{itemize}
\item hold and wait
\begin{itemize}
\item request all necessary resources at once
\end{itemize}
\item circular wait
\begin{itemize}
\item use a linear ordering of resources, locking in order
\end{itemize}
\end{itemize}
\subsection{Avoidance}
\textbf{Deadlock Avoidance}: dynamically decide whether a resource allocation could lead to a deadlock
\begin{itemize}
\item maximum resource requirements known
\item processes are indepenedent
\item no process may exit holding resources
\end{itemize}
Two strategies
\begin{enumerate}
\item don't start a process if the sum of all its requests could lead to deadlock
\item don't grant incremental resource request if it could lead to deadlock
\end{enumerate}
\textbf{Banker's Algorithm}: maintain a safe state where, with the current allocation scheme,
there is at least one sequence that does not lead to deadlock
\subsection{Detection}
\textbf{Deadlock Detection}: periodically check for a deadlock and choose a strategy to undo the deadlock
Strategies:
\begin{itemize}
\item abort all deadlocked processes
\item backup processes to some checkpoint
\item iteratively abort deadlocked processes until deadlock is gone
\item iteratively preempt resources until deadlock is gone
\end{itemize}
\section{Memory Managementt}
\subsection{Requirements}
\begin{itemize}
\item relocation: swapping, logical -> physical addressing
\item protection: no access to memory of other processes (run-time checks)
\item sharing: allow process to share memory
\item logical organization: write and compile code modules separately
\item physical organization: amount of RAM should not stop program from running
\end{itemize}
\subsection{Fixed Partitioning}
\textbf{Fixed partitioning}: all sizes are fixed from beginning
\begin{itemize}
\item equal size: simple but problematic
\item unequal size: assignment is better but slower
\end{itemize}
\subsection{Dynamic Partitioning}
\textbf{Dynamic partitioning}: partitions vary in length and number, can cause \textbf{external fragmentation}
(holes between blocks of memory) \\
Algorithms to fit a block into memory:
\begin{itemize}
\item best-fit algorithm: choose block closest in size (slow)
\item first-fit: fastest
\item next-fit: first fit from previous allocation
\item buddy system: split a larger block into 2, use a BST to keep track
\end{itemize}
\subsection{Paging}
\textbf{Paging}: partition memory into small fixed-size chunks
\begin{itemize}
\item \textbf{pages}: chunks of a process
\item \textbf{frames}: chunks of memory
\end{itemize}
logical address: (page, offset) \\
physical address: (frame, offset)
\subsection{Segmentation}
\textbf{Segmentation}: variable size pages, enables no internal fragmentation \\
logical address: (segment number, offset) \\
\subsection{Paging + Segmentation}
One segment table per process and one page table per segment
\section{Virtual Memory}
\subsection{Introduction}
\begin{description}
\item[Virtual Memory]: load the program's code and data on demand, reading and storing to disk
\item[Resident Set]: portion of the process that is in main memory
\item[Page Fault]: interrupt triggered when OS accesses non-resident data
\item[Thrashing]: constant paging degrading performance
\item[Internal Fragmentaion]: fragmentation inside a block
\item[External Fragmentation]: fragmentation between blocks
\end{description}
\subsection{Page Tables}
Each process has a \textbf{page table} that stores:
\begin{itemize}
\item page number
\item present bit
\item modified bit
\item other control bits
\item frame number
\end{itemize}
These tables can end up being too big so we use:
\begin{itemize}
\item \textbf{Two-Level Hierarchical Page Table}: use a table to page the paging table
\item \textbf{Inverted Page Table}: hash on page numbers to get pointer to page table entries,
total memory proportional to real address space
\end{itemize}
\textbf{Translation Lookaside Buffer}: a cache for page table that uses the principle of (temporal) locality
to store the recent pages accessed
\subsection{Segment Tables}
Each entry contains:
\begin{itemize}
\item length of the segment
\item present bit
\item modified bit
\item other control bits
\item segment base address
\end{itemize}
\subsection{Required Algorithms}
OS design must support:
\begin{itemize}
\item \textbf{fetch policy}: when a page should be brought into memory
\begin{itemize}
\item demand paging: bring pages in on demand
\item prepaging: bring in more pages than needed
\end{itemize}
\item \textbf{placement policy}: where a process piece should reside (minimize fragmentation)
\item \textbf{replacement policy}: which page should be replaced, local vs global scope
\begin{itemize}
\item optimal: replace page where time to next reference is longest
\item least recently used (LRU)
\item first-in first-out (FIFO): replace page that has been in memory longest
\item clock policy: replace first page with ``use'' bit 0 (approximates LRU)
\item page buffering: cache and revive pages to two lists: modified and unmodified
\item working set: remove pages that haven't been referenced in past $t$ time
\end{itemize}
\item \textbf{resident set management}: good size for resident set
\begin{description}
\item[fixed allocation]: decide how much memory to give ahead of time
\item[variable allocation]: number of pages allocated varies over time
\item[local replacement scope]: replace only pages from same process
\item[global replacement scope]: can replace any page
\end{description}
\begin{itemize}
\item fixed, local: difficult to predict correct size
\item variable, global: easy to implement, risk of reducing process' resident set
\item variable, local: re-evaluate allocation periodically
\end{itemize}
\item \textbf{cleaning policy}: when to write pages back to memory
\begin{itemize}
\item demand cleaning: write out a page only when replaced
\item pre-cleaning: write out in batches
\end{itemize}
\item \textbf{load control}: control number of processes resident in main memory (avoid thrashing) \\
figure out which process to remove: \textbf{process suspension}:
\begin{itemize}
\item lowest priority process
\item faulting process
\item last process activated
\item process with smallest resident set (easiest to reload)
\item largest process (most memory freed)
\end{itemize}
\end{itemize}
\section{Uniprocessor Scheduling}
\subsection{Scheduling Types}
\begin{description}
\item[Long-term]: whether to add to pool of processes
\item[Medium-term]: whether to add process to main memory
\item[Short-term]: which process to execute (aka dispatcher)
\begin{itemize}
\item invoked during an interrupt
\end{itemize}
\item[I/O Scheduling]: which process' I/O request to handle
\end{description}
\subsection{Criteria}
User-oriented, Performance
\begin{description}
\item[Turnaround Time]: time between submission and completion of process
\item[Response Time]: time between submission and first response
\item[Deadline]: most important criteria
\end{description}
User-oriented, Other
\begin{description}
\item[Predictability]: a program should run about the same regardless of load
\end{description}
System-oriented, Performance
\begin{description}
\item[Throughput]: number of processes completed per unit time
\item[Process Utilization]: percentage of time processor is busy
\end{description}
System-oriented, Other
\begin{description}
\item[Fairness]: without guidance, all processes should be treated the same
\item[Enforcing Priorities]: favour higher priorities
\item[Balancing Resources]: keep resources busy, favour processes that don't utilize stressed resources
\end{description}
\subsection{Scheduling Algorithms}
Decision mode can be:
\begin{itemize}
\item preemptive
\item non-preemptive
\item cooperative: developer programs the context switch
\end{itemize}
\subsubsection{First-Come First-Served}
Each process joins the ready queue and runs in FIFO
\begin{itemize}
\item good for computationally-intensive
\item favours CPU-bound processes
\end{itemize}
\subsubsection{Round Robin}
User preemption based on periodic interrupts every quantum $q$
\begin{itemize}
\item favours CPU-bound processes
\end{itemize}
Improve it with \textbf{Virtual Round Robin} which maintains specific I/O queues
to even out the playing field
\subsubsection{Shortest Process Next}
Non-preemptive strategy with shortest expected time
\begin{itemize}
\item possible starvation of long processes
\item must know service time
\end{itemize}
\subsubsection{Shortest Remaining Time}
Preemptive version of SPN
\begin{itemize}
\item no additional interrupt (as in RR)
\item better turnaround time than SPN
\item elapsed service time must be recorded
\end{itemize}
\subsubsection{Highest Response Ratio Next}
Choose process with greatest Ratio = $\frac{\text{time waiting} + \text{service time}}{\text{service time}}$
\begin{itemize}
\item need to know service time
\item no starvation
\end{itemize}
\subsubsection{Feedback}
Use queues of different priorities, penalizing processes that take long
by moving a process down a priority after it finishes running for $q$ time
\begin{itemize}
\item don't need to know service time
\item may favour I/O-bound processes
\item starvation of long running processes possible
\end{itemize}
\subsubsection{Fair Share}
Make decisions on the basis of process sets (\textit{e.g.} threads that make up applications).
Assign weights to each set and allocate resources according to weights
\section{Multiprocessor Scheduling}
\subsection{Introduction}
\subsubsection{Classifications}
\begin{itemize}
\item loosely coupled/distributed: work together but each has its own memory and I/O
\item functionally-specialized: controlled by a master processor
\item tightly coupled: share main memory, controlled by OS
\end{itemize}
\subsubsection{Granularity}
\begin{itemize}
\item independent: multiple unrelated processes
\item coarse/very coarse: distributed processing across network nodes
\item medium: parallel processing within an application (\textit{e.g} threads)
\item fine: parallelism inherent in single instruction stream
\end{itemize}
\subsection{Scheduling Design}
Assignment of processes to processors:
\begin{enumerate}
\item multiprocessor is uniform
\begin{enumerate}
\item static assignment: assign processes initially and no change
\item process migration: migrate processes dynamically
\item dynamic load balancing: start with static assignment but migrate as needed
\end{enumerate}
\item multiprocessor is heterogenous: need special software
\end{enumerate}
\end{document}