Sei sulla pagina 1di 42

2010

OPERATING SYSTEM DESIG


AND PRINCIPLE
DESIGN PROBLEM 2

SUBMITTED TO:

RAMANDEEP SIR

SUBMITTED BY :
ANJANI KUNWAR
RA1803A10
10807973
B.TECH(CSE)-H
Design Problems 1

Implement all CPU scheduling algorithms and memory


management algorithm in a single menu based program. Your
program may be either in GUI based or command line based in
any programming language you know. Various OS is using various
scheme for memory management. Explain and compare different
memory management techniques used in different operating
systems

1. Windows.

2. Macintosh

3 Linux

Expectation:

1. Give details of various memory management schemes on


each OS.
2. What are various CPU scheduling algorithm currently used
in above stated system
3. Discuss pros and cons of all CPU scheduling and memory
management algorithm.
CPU scheduling algorithms and memory management algorithm in a
single menu based program

SOURCE CODE

#include<iostream.h>

#include<conio.h>

#include<stdio.h>

class cpuschedule

int n,Bu[20];

float Twt,Awt,A[10],Wt[10],w;

public:

//Getting the No of processes & burst time

void Getdata();

//First come First served Algorithm

void Fcfs();

//Shortest job First Algorithm

void Sjf();

//Shortest job First Algorithm with Preemption

void SjfP();

//Shortest job First Algorithm with NonPreemption

void SjfNp();
//Round Robin Algorithm

void RoundRobin();

//Priority Algorithm

void Priority();

};

// Implementation file for Cpu scheduling

#include "cpuh.h"

//Getting no of processes and Burst time

void cpuschedule::Getdata()

int i;

cout<<"

Enter the no of processes:";

cin>>n;

for(i=1;i<=n;i++)

cout<<"

Enter The BurstTime for Process p"<<i<<"= ";

cin>>Bu[i];

}
//First come First served Algorithm

void cpuschedule::Fcfs()

int i,B[10];

Twt=0.0;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process p"<<i<<"= ";

cout<<B[i];

Wt[1]=0;

for(i=2;i<=n;i++)

Wt[i]=B[i-1]+Wt[i-1];

//Calculating Average Weighting Time

for(i=1;i<=n;i++)

Twt=Twt+Wt[i];

Awt=Twt/n;

cout<<"
Total Weighting Time="<<Twt;

cout<<"

Average Weighting Time="<<Awt<<"

";

//Shortest job First Algorithm

void cpuschedule::Sjf()

int i,j,temp,B[10];

Twt=0.0;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process p"<<i<<"= ";

cout<<B[i];

for(i=n;i>=1;i--)

for(j=1;j<=n;j++)

if(B[j-1]>B[j])
{

temp=B[j-1];

B[j-1]=B[j];

B[j]=temp;

Wt[1]=0;

for(i=2;i<=n;i++)

Wt[i]=B[i-1]+Wt[i-1];

//calculating Average Weighting Time

for(i=1;i<=n;i++)

Twt=Twt+Wt[i];

Awt=Twt/n;

cout<<"

Total Weighting Time="<<Twt;

cout<<"

Average Weighting Time="<<Awt<<"

";

}
//Shortest job First Algorithm with NonPreemption

void cpuschedule::SjfNp()

int i,B[10],Tt=0,temp,j;

char S[10];

float A[10],temp1,t;

Twt=0.0;

w=0.0;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process p"<<i<<"= ";

cout<<B[i];

S[i]='T';

Tt=Tt+B[i];

cout<<"

Enter the Arrival Time for"<<i<<"th process= ";

cin>>A[i];

}
for(i=n;i>=1;i--)

for(j=3;j<=n;j++)

if(B[j-1]>B[j])

temp=B[j-1];

temp1=A[j-1];

B[j-1]=B[j];

A[j-1]=A[j];

B[j]=temp;

A[j]=temp1;

for(i=1;i<=n;i++)

cout<<"

p"<<i<<" "<<B[i]<<" "<<A[i];

//For the 1st process


Wt[1]=0;

w=w+B[1];

t=w;

S[1]='F';

while(w<Tt)

i=2;

while(i<=n)

if(S[i]=='T'&&A[i]<=t)

Wt[i]=w;

cout<<"

WT"<<i<<"="<<Wt[i];

S[i]='F';

w=w+B[i];

t=w;

i=2;

else

i++;

}
}

for(i=1;i<=n;i++)

cout<<"

Wt"<<i<<"=="<<Wt[i];

//calculating average weighting Time

for(i=1;i<=n;i++)

Twt=Twt+(Wt[i]-A[i]);

Awt=Twt/n;

cout<<"Total Weighting Time="<<Twt<<"

";

cout<<"Average Weighting Time="<<Awt<<"

";

//Priority Algorithm

void cpuschedule::Priority()

int i,B[10],P[10],j;

w=0.0;
int max;

Twt=0.0;

max=1;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process p"<<i<<"= ";

cout<<B[i];

cout<<"

Enter the priority for process P"<<i<<"= ";

cin>>P[i];

if(max<P[i])

max=P[i];

j=1;

while(j<=max)

i=1;

while(i<=n)

if(P[i]==j)

{
Wt[i]=w;

w=w+B[i];

i++;

j++;

//calculating average weighting Time

for(i=1;i<=n;i++)

Twt=Twt+Wt[i];

Awt=Twt/n;

cout<<"Total Weighting Time="<<Twt<<"

";

cout<<"Average Weighting Time="<<Awt<<"

";

//Shortest job First Algorithm with Preemption

void cpuschedule::SjfP()

int i,j,m,Wt[10],k,B[10],A[10],Tt=0,Wtm[10],temp;

char S[20],start[20];
int max=0,Time=0,min;

float Twt=0.0,Awt;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process P"<<i<<"= "<<B[i];

if(B[i]>max)

max=B[i];

Wt[i]=0;

S[i]='T';

start[i]='F';

Tt=Tt+B[i];

cout<<"

Enter the Arrival Time for"<<i<<"th process= ";

cin>>A[i];

if(A[i]>Time)

Time=A[i];

//cout<<"

Max="<<max;

int w=0,flag=0,t=0;

i=1;
while(t<Time)

if(A[i]<=t && B[i]!=0)

if(flag==0)

Wt[i]=Wt[i]+w;

cout<<"

Wt["<<i<<"]="<<Wt[i];

B[i]=B[i]-1;

if(B[i]==0)

S[i]='F';

start[i]='T';

t++;

w=w+1;

if(S[i]!='F')

j=1;flag=1;

while(j<=n && flag!=0)

if(S[j]!='F' && B[i]>B[j] && A[j]<=t && i!=j )

{
flag=0;

Wt[i]=Wt[i]-w;

i=j;

else

flag=1;

j++;

else

i++;

j=1;

while(A[j]<=t &&j<=n)

if(B[i]>B[j] && S[j]!='F')

flag=0;

i=j;

j++;
}

else

if(flag==0)

i++;

cout<<"

Printing remaining burst time

";

for(i=1;i<=n;i++)

cout<<"

B["<<i<<"]="<<B[i];

cout<<"

";

while(w<Tt)

min=max+1;

i=1;

while(i<=n)
{

if(min>B[i] && S[i]=='T')

min=B[i];

j=i;

i++;

i=j;

if(w==Time && start[i]=='T')

w=w+B[i];

S[i]='F';

else

Wt[i]=Wt[i]+w;

w=w+B[i];

S[i]='F';

cout<<"Weight info
";

for(i=1;i<=n;i++)

cout<<"

WT["<<i<<"]="<<Wt[i];

cout<<"after subtracting arrival time

";

for(i=1;i<=n;i++)

Wt[i]=Wt[i]-A[i];

cout<<"

WT["<<i<<"]="<<Wt[i];

//Calculating Average Weighting time

for(i=1;i<=n;i++)

Twt=Twt+Wt[i];

Awt=Twt/n;

cout<<"

Average Weighting Time="<<Awt;

}
//Round Robin Algorithm

void cpuschedule::RoundRobin()

int i,j,tq,k,B[10],Rrobin[10][10],count[10];

int max=0;

int m;

Twt=0.0;

for(i=1;i<=n;i++)

B[i]=Bu[i];

cout<<"

Burst time for process p"<<i<<"= ";

cout<<B[i];

if(max<B[i])

max=B[i];

Wt[i]=0;

cout<<"

Enter the Time Quantum=";

cin>>tq;

//TO find the dimension of the Rrobin array


m=max/tq+1;

//initializing Rrobin array

for(i=1;i<=n;i++)

for(j=1;j<=m;j++)

Rrobin[i][j]=0;

//placing value in the Rrobin array

i=1;

while(i<=n)

j=1;

while(B[i]>0)

if(B[i]>=tq)

B[i]=B[i]-tq;

Rrobin[i][j]=tq;

j++;

}
else

Rrobin[i][j]=B[i];

B[i]=0;

j++;

count[i]=j-1;

i++;

cout<<"Display

";

for(i=1;i<=n;i++)

for(j=1;j<=m;j++)

cout<<"Rr["<<i<<","<<j<<"]="<<Rrobin[i][j];

cout<<" ";

cout<<"

";

}
//calculating weighting time

int x=1;

i=1;

while(x<=n)

for(int a=1;a<x;a++)

Wt[x]=Wt[x]+Rrobin[a][i];

i=1;

int z=x;

j=count[z];

k=1;

while(k<=j-1)

if(i==n+1)

i=1;

k++;

else

if(i!=z)
{

Wt[z]=Wt[z]+Rrobin[i][k];

i++;

x++;

for(i=1;i<=n;i++)

cout<<"

Weighting Time for process P"<<i<<"="<<Wt[i];

//calculating Average Weighting Time

for(i=1;i<=n;i++)

Twt=Twt+Wt[i];

Awt=Twt/n;

cout<<"

Total Weighting Time="<<Twt;

cout<<"

Average Weighting Time="<<Awt<<"

";

}
//Application file for cpu Scheduling

#include "cpuh.h"

void main()

int ch,cho;

cpuschedule c;

do

cout<<" MENU

";

cout<<"1.Getting BurstTime

";

cout<<"2.FirstComeFirstServed

";

cout<<"3.ShortestJobFirst

";

cout<<"4.RoundRobin

";

cout<<"5.Priority

";

cout<<"6.EXIT

";
cout<<"Enter your choice

";

cin>>ch;

switch(ch)

case 1:

c.Getdata();

break;

case 2:

cout<<"FIRST COME FIRST SERVED SCHEDULING

";

c.Fcfs();

break;

case 3:

cout<<"SHORTEST JOB FIRST SCHEDULING

";

do

cout<<"1.SJF-Normel

";

cout<<"2.SJF-Preemptive

";

cout<<"3.SJF-NonPreemptive
";

cout<<"Enter your choice

";

cin>>cho;

switch(cho)

case 1:

c.Sjf();

break;

case 2:

c.SjfP();

break;

case 3:

c.SjfNp();

break;

}while(cho<=3);

break;

case 4:

cout<<"ROUND ROBIN SCHEDULING

";

c.RoundRobin();

break;
case 5:

cout<<"PRIORITY SCHEDULING

";

c.Priority();

break;

case 6:

break;

}while(ch<=5);

}
CPU Scheduling and process scheduling

A multiprogramming operating system allows more than one process to be loaded


into the executabel memory at a time and for the loaded process to share the CPU
using time-multiplexing.Part of the reason for using multiprogramming is that the
operating system itself is implemented as one or more processes, so there must be a
way for the operating system and application processes to share the CPU. Another
main reason is the need for processes to perform I/O operations in the normal
course of computation. Since I/O operations ordinarily require orders of magnitude
more time to complete than do CPU instructions, multiprograming systems allocate
the CPU to another process whenever a process invokes an I/O operation.

Goals and purpose of CPU scheduling

Goals and purpose for Scheduling:-

Make sure your scheduling strategy is good enough with the following criteria:

o Utilization/Efficiency: keep the CPU busy 100% of the time with


useful work
o Throughput: maximize the number of jobs processed per hour.
o Turnaround time: from the time of submission to the time of
completion, minimize the time batch users must wait for output
o Waiting time: Sum of times spent in ready queue - Minimize this
o Response Time: time from submission till the first response is
produced, minimize response time for interactive users
o Fairness: make sure each process gets a fair share of the CPU

• CPU utilization We want to keep the CPU as busy as possible. CPU


utilization may range from 0 to 100 percent. In a real system, it should range
from 40 percent(for a lightly loaded system) to 90 percent (for a heavily
used system).
• Throughput If the CPU is busy, then work is being done. One measure of
work is the number of processes that are completed per time unit, called
throughput.
• Turnaround time From the point of view of a particular process, the
important criterion is how long it takes to execute that process. The interval
from the time of submission to the time of completion is called turnaround
time. Turnaround time is the sum of all the periods spent waiting to get into
memory, waiting in the ready queue, executing on the CPU, and doing I/O.
• Waiting time The CPU scheduling algorithm does not really affect the
amount of time during which a process executes or does I/O. The algorithm
affects only the amount of time that a process spends waiting in the ready
queue. Thus, rather than looking at turnaround time, we might simply
consider the waiting time for each process.
• Response time In an interactive system, turnaround time may not be the best
criterion. Often, a process can produce some output fairly early and can
continue computing new results while previous results are being output to
the user. Thus, another measure is the time from the submission of a request
until the first response is produced. This measure, called response time, is
the amount of time it takes to start responding, but not the time it takes
output that response. The turnaround time is generally limited by the speed
of the ouput device.

Preemptive Vs Nonpreemptive Scheduling

The Scheduling algorithms can be divided into two categories with respect to how
they deal with clock interrupts.

• Non-Preemptive: Non-preemptive algorithms are designed so that once a


process enters the running state(is allowed a process), it is not removed from
the processor until it has completed its service time ( or it explicitly yields
the processor).
o Aslo called as Co-operative scheduling.Co-operative schedulers are
generally very simple, as the processes are arranged in a ROUND
ROBIN queue. When a running process gives itself up, it goes to the
end of the queue. The process at the top of the queue is then run, and
all processes in the queue move up one place. This provides a measure
of fairness, but does not prevent one process from monopolizing the
system (failing to give itself up).
• Preemptive: Preemptive algorithms are driven by the notion of prioritized
computation. The process with the highest priority should always be the one
currently using the processor. If a process is currently using the processor
and a new process with a higher priority enters, the ready list, the process on
the processor should be removed and returned to the ready list until it is once
again the highest-priority process in the system.
o Pre-emptive scheduling uses a real-time clock that generates interrupts
at regular intervals (say every 1/100th of a second). Each time an
interrupt occurs, the processor is switched to another task. Systems
employing this type of scheduling generally assign priorities to each
process, so that some may be executed more frequently than others.

Many of scheduling algorithems are available and each of them are either

Schedule Algorithms

Based on nature of scheduling algorithm i.e. preemptive and non preemptive we


have following algorithms for scheduling(Switching CPU between processes).

• FCFS Scheduling
• Round Robin Scheduling
• SJF Scheduling
• SRT Scheduling
• Priority Scheduling
• Multilevel Queue Scheduling
• Multilevel Feedback Queue Scheduling

Each of scheduling algorithm has its own advantages and disadvantages.

First In First Out (FIFO) or First Come First serve

This is a Non-Premptive scheduling algorithm. A FIFO queue is a list of available


processes awaiting execution by the processor. New processes arrive and are
placed at the end of the queue. The process at the start of the queue is assigned the
processor when it next becomes available, and all other processes move up one slot
in the queue.FIFO algorithm assigns priority to processes in the order in which
they request the processor.The process that requests the CPU first is allocated the
CPU first.When a process comes in, add its PCB to the tail of ready queue. When
running process terminates, dequeue the process (PCB) at head of ready queue and
run it.
Consider the example with P1=20, P2=3, P3=3

Gantt Chart for FCFS : 0 - 20 P1 , 21 - 23 P2 , 22 - 26 P3

Turnaround time for P1 = 20


Turnaround time for P1 = 20 + 3
Turnaround time for P1 = 20 + 3 + 3

Average Turnaround time = (20*3 + 3*2 + 3*1) / 3

In general we have (n*a + (n-1)*b + ....) / n

If we want to minimize this, a should be the smallest, followed by b and


so on.

Note: While the FIFO algorithm is easy to implement, it ignores the service
time request and all other criteria that may influence the performance with
respect to turnaround or waiting time.

Problem or Disadvantages: One Process can monopolize CPUso if we have first


processes with long CPU time in that case we have wait for CPU even if our
process is too short.Means short processes also need to wait for long time.

Solution: Limit the amount of time a process can execute without a context switch.
This time is called a time slice.

The FCFS scheduling algorithm is nonpreemptive. Once the CPU has been
allocated to a process, that process keeps the CPU until it wants to release the
CPU, either by terminating or by requesting I/O. The FCFS algorithm is
particularly troublesome for time-sharing systems. where it is important that each
user get a share of the CPU at regular intervals. It would be disastrous to allow one
process to keep the CPU for an extended period.

Round Robin Scheduling

RR (Round Robin) is One of the oldest, simplest, fairest and most widely used
algorithm.The round-robin(RR) scheduling algorithm is designed especially for
time-sharing systems. A small unit of time, called a time quantum or time-slice is
defined. A time quantum is generally from 10 to 100 milliseconds. The ready
queue is treated as a circular queue. The CPU scheduler goes around the ready
queue, allocating the CPU to each process for a time interval of up to 1 time
quantum.

• Round Robin Scheduling is preemptive (at the end of time-slice) therefore it


is effective in time-sharing environments in which the system needs to
guarantee reasonable response times for interactive users.

To implement RR scheduling, we keep the ready queue as a first-in, first-out


(FIFO) queue of processes. New processes are added to the tail of the ready queue.
The CPU scheduler picks the first process, sets a timer to interrupt after 1 time
quantum, and dispatches the process.

One of two things will then happen. The process may have a CPU burst of less
than 1 time quantum. In this case, the process itself will release the CPU
voluntarily. The scheduler will then proceed to the next process in the ready queue.
Otherwise, it the CPU burst of the currently running process is greater than 1 time
quantum, the timer will go off and will cause an interrupt to the operating system.
A context switch will be executed, and the process will be put at the tail of the
ready queue. The CPU scheduler will then select the next process from the ready
queue.

In the RR scheduling algorithm, no process is allocated the CPU for more than 1
time quantum in a row. If a process's CPU burst exceeds 1 time quantum, that
process os preempted and is put back in the ready queue. The RR scheduling
algorithm is inherently preemptive.

Performance:-The performance of the RR algorithm depends heavily on the size of


the time quantum. At one extreme, if the time quantum is very large(infinite), the
RR policy is the same as the FCFS policy. If the time quantum is very small(say 10
milliseconds), the RR approach is called processor sharing, and appears(in theory)
to the users as though each of n processes has its own processor running at 1/n the
speed of the real processor.

For operating systems, we need to consider the effect of context switching on the
performance of RR scheduling. Let use assume that we have only 1 process of 10
time units. If the quantum is 12 time units, the process finishes in less than 1 time
quantum, with no overhead. If the quantum is 6 time units, however, the process
will require 2 quanta, resulting in a context switch. If the time quantum is 1 time
unit, then 9 context switches will occur, slowing the execution of the process
accordingly.
Thus, we want the time quantum to be large with respect to the context switch
time. If the context switch time is approximately 5 percent of the time quantum,
then about 5 percent of the CPU time will be spent in context switch.

Shortest-Job-First Scheduling(SJF)

Maintain the Ready queue in order of increasing job lengths. When a job comes in,
insert it in the ready queue based on its length. When current process is done, pick
the one at the head of the queue and run it. This algorithm associates with each
process the length of the next CPU burst. When the CPU is available, it is assigned
to the process that has the next smallest CPU burst. If two processes have the same
length CPU burst, FCFS scheduling is used to break the tie.

This is provably the most optimal in terms of turnaround/response time.

SJF algorithm may be either preemptive or non- preemptive. A preemptive SJF


algorithm will preempt the currently executing process, whereas a non-preemptive
SJF algorithm will allow the currently running process to finish its CPU burst.
Preemptive SJF scheduling is sometimes called shortest-remaining-time-first
scheduling.

Make an estimate based on the past behavior.

Say the estimated time (burst) for a process is E0, suppose the actual
time is measured to be T0.

Update the estimate by taking a weighted sum of these two


ie. E1 = aT0 + (1-a)E0

in general, E(n+1) = aTn + (1-a)En (Exponential average)

if a=0, recent history no weightage


if a=1, past history no weightage.

typically a=1/2.

E(n+1) = aTn + (1-a)aTn-1 + (1-a)^jatn-j + ...

Older information has less weightage


;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;

The memory management subsystem provides:

Large Address Spaces

The operating system makes the system appear as if it has a larger amount of
memory than it actually has. The virtual memory can be many times larger
than the physical memory in the system,

Protection

Each process in the system has its own virtual address space. These virtual
address spaces are completely separate from each other and so a process
running one application cannot affect another. Also, the hardware virtual
memory mechanisms allow areas of memory to be protected against writing.
This protects code and data from being overwritten by rogue applications.

Memory Mapping

Memory mapping is used to map image and data files into a processes
address space. In memory mapping, the contents of a file are linked directly
into the virtual address space of a process.

Fair Physical Memory Allocation

The memory management subsystem allows each running process in the


system a fair share of the physical memory of the system,

Shared Virtual Memory

Shared memory can also be used as an Inter Process Communication (IPC)


mechanism, with two or more processes exchanging information via memory
common to all of them.

**Linux supports the Unix TM System V shared memory IPC.

Demang paging:;;
Linux uses demand paging to load executable images into a processes virtual
memory. Whenever a command is executed, the file containing it is opened
and its contents are mapped into the processes virtual memory.

Swapping

Linux uses a Least Recently Used (LRU) page aging technique to fairly
choose pages which might be removed from the system. This scheme
involves every page in the system having an age which changes as the page
is accessed. The more that a page is accessed, the younger it is; the less that
it is accessed the older and more stale it becomes. Old pages are good
candidates for swapping.

Physical and Virtual Addressing Modes

Physical addressing mode requires no page tables and the processor does not
attempt to perform any address translations in this mode. The Linux kernel is
linked to run in physical address space.

The Alpha AXP processor does not have a special physical addressing mode.
Instead, it divides up the memory space into several areas and designates two of
them as physically mapped addresses. This kernel address space is known as
KSEG address space and it encompasses all addresses .

Caches

Linux uses a number of memory management related caches:

Buffer Cache

The buffer cache contains data buffers that are used by the block device
drivers.

These buffers are of fixed sizes (for example 512 bytes) and contain blocks
of information that have either been read from a block device or are being
written to it. All hard disks are block devices.
Page Cache

This is used to speed up access to images and data on disk.

It is used to cache the logical contents of a file a page at a time and is


accessed via the file and offset within the file. As pages are read into
memory from disk, they are cached in the page cache.

Swap Cache

Only modified pages are saved in the swap file.

Hardware Caches

One commonly implemented hardware cache is in the processor; a cache of


Page Table Entries. In this case, the processor does not always read the page
table directly but instead caches translations for pages as it needs them.

;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;;;;;;;;;;;;;;;

SJF

Advantages

1) Provably optimal for minimizing average wait time (with no preemption)


– Moving shorter job before longer job improves waiting time of short job
more than it harms waiting time of long job

2) Helps keep I/O devices busy

Disadvantages

1) Not practical: Cannot predict future CPU burst time


2) OS solution: Use past behavior to predict future behavior

Starvation: Long jobs may never be scheduled


RR

Advantages

1) Jobs get fair share of CPU


2) Shortest jobs finish relatively quickly

Disadvantages

1) Poor average waiting time with similar job lengths


– Example: 10 jobs each requiring 10 time slices

– RR: All complete after about 100 time slices

– FCFS performs better!

2) Performance depends on length of time-slice


– If time-slice too short, pay overhead of context switch

– If time-slice too long, degenerate to FCFS

Priority Based

!Idea: Each job is assigned a priority

1) Schedule highest priority ready job


2) May be preemptive or non-preemptive
3) Priority may be static or dynamic

Advantages
1) Static priorities work well for real time systems
2) Dynamic priorities work well for general workloads

Disadvantages

1) Low priority jobs can starve


2) How to choose priority of each job?
\

Goal: Adjust priority of job to match CPU burst

1) Approximate SCTF by giving short jobs high priority

Memory Management Algorithm

Pros & Cons of Paging

• Allows jobs to be allocated in non-contiguous memory

locations.

– Memory used more efficiently; more jobs can fit.

• Size of page is crucial (not too small, not too large).

• Increased overhead occurs.

• Reduces, but does not eliminate, internal fragmentation.

Pros & Cons of Demand Paging

• First scheme in which a job was no longer constrained by


the size of physical memory (virtual memory).

• Uses memory more efficiently than previous schemes

because sections of a job used seldom or not at all aren’t

loaded into memory unless specifically requested.

• Increased overhead caused by tables and page interrupts.

Pros & Cons of Segmentation

• Compaction.

• External fragmentation.

• Secondary storage handling.

• Memory is allocated dynamically

Pros & Cons of Segment/Demand

Paging

• Overhead required for the extra tables

• Time required to reference segment table and page table.

• Logical benefits of segmentation.

• Physical benefits of paging

• To minimize number of references, many systems use

associative memory to speed up the process


Memory management schemes:- • The concept of a logical address space that is
bound to a separate

physical address space is central to proper memory management.

• Logical address – generated by the CPU; also referred to as virtual

address

• Physical address – address seen by the memory unit

• Logical and physical addresses are the same in compile-time and loadtime

address-binding schemes; logical (virtual) and physical addresses

differ in execution-time address-binding scheme

Memory Allocation Techniques


The best technique for allocating memory depends on how the memory will be
used. Table 3 summarizes the Windows kernel-mode memory allocation routines
that are discussed in this section.
Name Description
Allocates paged or nonpaged, cached, and
cache-aligned memory from the kernel-mode
ExAllocatePoolXxx pool.
ExAllocatePoolWithTag is the primary
function for memory allocation.
Allocates a buffer for common-buffer DMA
AllocateCommonBuffer
and ensures cross-platform compatibility.
Allocates nonpaged, physically contiguous,
cache-aligned memory.
Drivers should not use these functions to
MmAllocateContiguousMemor
allocate memory for DMA because the
y
addresses that they return are not guaranteed to
[SpecifyCache]
be compatible across all hardware
configurations. Drivers should use
AllocateCommonBuffer instead.
Reserves virtual addresses in a specific range
for later use in an MDL, but does not map
them to physical memory.
The driver can later use IoAllocateMdl,
MmAllocateMappingAddress
MmProbeAndLockPages, and
MmMapLockedPagesWithReservedMappin
g to map the virtual addresses to physical
memory.
Allocates nonpaged, noncached, page-aligned
MmAllocateNoncachedMemor
memory. Drivers rarely use this function; they
y
must not use it to allocate buffers for DMA.
Allocates nonpaged, noncontiguous pages from
physical memory, specifically for an MDL. An
MDL can describe up to 4 GB. This function
might return fewer pages than the caller
requested. Therefore, the driver must call
MmGetMdlByteCount to verify the number
of allocated bytes.
A driver can call this function to allocate
MmAllocatePagesForMdl
physical memory from a particular address
range, such as allocating memory for a device
that cannot address memory above 4 GB.
However, drivers should not use this function
to allocate buffers for DMA. Only the
Windows DMA routines guarantee the cross-
platform compatibility that drivers require.

Potrebbero piacerti anche