Checkpoint synchronization: Difference between revisions
Content added Content deleted
Thundergnat (talk | contribs) m (syntax highlighting fixup automation) |
Thundergnat (talk | contribs) m (Automated syntax highlighting fixup (second round - minor fixes)) |
||
Line 1: | Line 1: | ||
[[Category:Classic CS problems and programs]] |
|||
{{task|Concurrency}}{{requires|Concurrency}} |
|||
The checkpoint synchronization is a problem of synchronizing multiple [[task]]s. Consider a workshop where several workers ([[task]]s) assembly details of some mechanism. When each of them completes his work they put the details together. There is no store, so a worker who finished its part first must wait for others before starting another one. Putting details together is the ''checkpoint'' at which [[task]]s synchronize themselves before going their paths apart. |
The checkpoint synchronization is a problem of synchronizing multiple [[task]]s. Consider a workshop where several workers ([[task]]s) assembly details of some mechanism. When each of them completes his work they put the details together. There is no store, so a worker who finished its part first must wait for others before starting another one. Putting details together is the ''checkpoint'' at which [[task]]s synchronize themselves before going their paths apart. |
||
Line 11: | Line 12: | ||
If you can, implement workers joining and leaving. |
If you can, implement workers joining and leaving. |
||
=={{header|Ada}}== |
=={{header|Ada}}== |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="ada">with Ada.Calendar; use Ada.Calendar; |
||
with Ada.Numerics.Float_Random; |
with Ada.Numerics.Float_Random; |
||
with Ada.Text_IO; use Ada.Text_IO; |
with Ada.Text_IO; use Ada.Text_IO; |
||
Line 187: | Line 187: | ||
D ends shift |
D ends shift |
||
</pre> |
</pre> |
||
=={{header|BBC BASIC}}== |
=={{header|BBC BASIC}}== |
||
{{works with|BBC BASIC for Windows}} |
{{works with|BBC BASIC for Windows}} |
||
<syntaxhighlight lang=bbcbasic> INSTALL @lib$+"TIMERLIB" |
<syntaxhighlight lang="bbcbasic"> INSTALL @lib$+"TIMERLIB" |
||
nWorkers% = 3 |
nWorkers% = 3 |
||
DIM tID%(nWorkers%) |
DIM tID%(nWorkers%) |
||
Line 269: | Line 268: | ||
Worker 2 starting (5 ticks) |
Worker 2 starting (5 ticks) |
||
</pre> |
</pre> |
||
=={{header|C}}== |
=={{header|C}}== |
||
Using OpenMP. Compiled with <code>gcc -Wall -fopenmp</code>. |
Using OpenMP. Compiled with <code>gcc -Wall -fopenmp</code>. |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="c">#include <stdio.h> |
||
#include <stdlib.h> |
#include <stdlib.h> |
||
#include <unistd.h> |
#include <unistd.h> |
||
Line 303: | Line 301: | ||
return 0; |
return 0; |
||
}</syntaxhighlight> |
}</syntaxhighlight> |
||
=={{header|C++}}== |
|||
{{works with|C++11}} |
|||
<syntaxhighlight lang=cpp>#include <iostream> |
|||
#include <chrono> |
|||
#include <atomic> |
|||
#include <mutex> |
|||
#include <random> |
|||
#include <thread> |
|||
std::mutex cout_lock; |
|||
class Latch |
|||
{ |
|||
std::atomic<int> semafor; |
|||
public: |
|||
Latch(int limit) : semafor(limit) {} |
|||
void wait() |
|||
{ |
|||
semafor.fetch_sub(1); |
|||
while(semafor.load() > 0) |
|||
std::this_thread::yield(); |
|||
} |
|||
}; |
|||
struct Worker |
|||
{ |
|||
static void do_work(int how_long, Latch& barrier, std::string name) |
|||
{ |
|||
std::this_thread::sleep_for(std::chrono::milliseconds(how_long)); |
|||
{ std::lock_guard<std::mutex> lock(cout_lock); |
|||
std::cout << "Worker " << name << " finished work\n"; } |
|||
barrier.wait(); |
|||
{ std::lock_guard<std::mutex> lock(cout_lock); |
|||
std::cout << "Worker " << name << " finished assembly\n"; } |
|||
} |
|||
}; |
|||
int main() |
|||
{ |
|||
Latch latch(5); |
|||
std::mt19937 rng(std::random_device{}()); |
|||
std::uniform_int_distribution<> dist(300, 3000); |
|||
std::thread threads[] { |
|||
std::thread(&Worker::do_work, dist(rng), std::ref(latch), "John"), |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Henry"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Smith"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Jane"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Mary"}, |
|||
}; |
|||
for(auto& t: threads) t.join(); |
|||
std::cout << "Assembly is finished"; |
|||
}</syntaxhighlight> |
|||
{{out}} |
|||
<pre> |
|||
Worker Mary finished work |
|||
Worker Smith finished work |
|||
Worker John finished work |
|||
Worker Henry finished work |
|||
Worker Jane finished work |
|||
Worker Jane finished assembly |
|||
Worker Smith finished assembly |
|||
Worker Mary finished assembly |
|||
Worker Henry finished assembly |
|||
Worker John finished assembly |
|||
Assembly is finished |
|||
</pre> |
|||
=={{header|C sharp|C#}}== |
=={{header|C sharp|C#}}== |
||
{{works with|C sharp|10}} |
{{works with|C sharp|10}} |
||
<syntaxhighlight lang=csharp>using System; |
<syntaxhighlight lang="csharp">using System; |
||
using System.Linq; |
using System.Linq; |
||
using System.Threading; |
using System.Threading; |
||
Line 465: | Line 394: | ||
//etc |
//etc |
||
</pre> |
</pre> |
||
=={{header|C++}}== |
|||
{{works with|C++11}} |
|||
<syntaxhighlight lang="cpp">#include <iostream> |
|||
#include <chrono> |
|||
#include <atomic> |
|||
#include <mutex> |
|||
#include <random> |
|||
#include <thread> |
|||
std::mutex cout_lock; |
|||
class Latch |
|||
{ |
|||
std::atomic<int> semafor; |
|||
public: |
|||
Latch(int limit) : semafor(limit) {} |
|||
void wait() |
|||
{ |
|||
semafor.fetch_sub(1); |
|||
while(semafor.load() > 0) |
|||
std::this_thread::yield(); |
|||
} |
|||
}; |
|||
struct Worker |
|||
{ |
|||
static void do_work(int how_long, Latch& barrier, std::string name) |
|||
{ |
|||
std::this_thread::sleep_for(std::chrono::milliseconds(how_long)); |
|||
{ std::lock_guard<std::mutex> lock(cout_lock); |
|||
std::cout << "Worker " << name << " finished work\n"; } |
|||
barrier.wait(); |
|||
{ std::lock_guard<std::mutex> lock(cout_lock); |
|||
std::cout << "Worker " << name << " finished assembly\n"; } |
|||
} |
|||
}; |
|||
int main() |
|||
{ |
|||
Latch latch(5); |
|||
std::mt19937 rng(std::random_device{}()); |
|||
std::uniform_int_distribution<> dist(300, 3000); |
|||
std::thread threads[] { |
|||
std::thread(&Worker::do_work, dist(rng), std::ref(latch), "John"), |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Henry"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Smith"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Jane"}, |
|||
std::thread{&Worker::do_work, dist(rng), std::ref(latch), "Mary"}, |
|||
}; |
|||
for(auto& t: threads) t.join(); |
|||
std::cout << "Assembly is finished"; |
|||
}</syntaxhighlight> |
|||
{{out}} |
|||
<pre> |
|||
Worker Mary finished work |
|||
Worker Smith finished work |
|||
Worker John finished work |
|||
Worker Henry finished work |
|||
Worker Jane finished work |
|||
Worker Jane finished assembly |
|||
Worker Smith finished assembly |
|||
Worker Mary finished assembly |
|||
Worker Henry finished assembly |
|||
Worker John finished assembly |
|||
Assembly is finished |
|||
</pre> |
|||
=={{header|Clojure}}== |
=={{header|Clojure}}== |
||
With a fixed number of workers, this would be very straightforward in Clojure by using a ''CyclicBarrier'' from ''java.util.concurrent''. |
With a fixed number of workers, this would be very straightforward in Clojure by using a ''CyclicBarrier'' from ''java.util.concurrent''. |
||
So to make it interesting, this version supports workers dynamically joining and parting, and uses the new (2013) ''core.async'' library to use Go-like channels. |
So to make it interesting, this version supports workers dynamically joining and parting, and uses the new (2013) ''core.async'' library to use Go-like channels. |
||
Also, each worker passes a value to the checkpoint, so that some ''combine'' function could consume them once they're all received. |
Also, each worker passes a value to the checkpoint, so that some ''combine'' function could consume them once they're all received. |
||
<syntaxhighlight lang=clojure>(ns checkpoint.core |
<syntaxhighlight lang="clojure">(ns checkpoint.core |
||
(:gen-class) |
(:gen-class) |
||
(:require [clojure.core.async :as async :refer [go <! >! <!! >!! alts! close!]] |
(:require [clojure.core.async :as async :refer [go <! >! <!! >!! alts! close!]] |
||
Line 542: | Line 537: | ||
</syntaxhighlight> |
</syntaxhighlight> |
||
=={{header|D}}== |
=={{header|D}}== |
||
<syntaxhighlight lang=d>import std.stdio; |
<syntaxhighlight lang="d">import std.stdio; |
||
import std.parallelism: taskPool, defaultPoolThreads, totalCPUs; |
import std.parallelism: taskPool, defaultPoolThreads, totalCPUs; |
||
Line 582: | Line 576: | ||
Checkpoint reached. Assemble details ... |
Checkpoint reached. Assemble details ... |
||
Mechanism with 11 parts finished: 55</pre> |
Mechanism with 11 parts finished: 55</pre> |
||
=={{header|E}}== |
=={{header|E}}== |
||
Line 589: | Line 582: | ||
That said, here is an implementation of the task as stated. We start by defining a 'flag set' data structure (which is hopefully also useful for other problems), which allows us to express the checkpoint algorithm straightforwardly while being protected against the possibility of a task calling <code>deliver</code> or <code>leave</code> too many times. Note also that each task gets its own reference denoting its membership in the checkpoint group; thus it can only speak for itself and not break any global invariants. |
That said, here is an implementation of the task as stated. We start by defining a 'flag set' data structure (which is hopefully also useful for other problems), which allows us to express the checkpoint algorithm straightforwardly while being protected against the possibility of a task calling <code>deliver</code> or <code>leave</code> too many times. Note also that each task gets its own reference denoting its membership in the checkpoint group; thus it can only speak for itself and not break any global invariants. |
||
<syntaxhighlight lang=e>/** A flagSet solves this problem: There are N things, each in a true or false |
<syntaxhighlight lang="e">/** A flagSet solves this problem: There are N things, each in a true or false |
||
* state, and we want to know whether they are all true (or all false), and be |
* state, and we want to know whether they are all true (or all false), and be |
||
* able to bulk-change all of them, and all this without allowing double- |
* able to bulk-change all of them, and all this without allowing double- |
||
Line 703: | Line 696: | ||
} |
} |
||
interp.waitAtTop(promiseAllFulfilled(waits))</syntaxhighlight> |
interp.waitAtTop(promiseAllFulfilled(waits))</syntaxhighlight> |
||
=={{header|Erlang}}== |
=={{header|Erlang}}== |
||
A team of 5 workers assemble 3 items. The time it takes to assemble 1 item is 0 - 100 milliseconds. |
A team of 5 workers assemble 3 items. The time it takes to assemble 1 item is 0 - 100 milliseconds. |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="erlang"> |
||
-module( checkpoint_synchronization ). |
-module( checkpoint_synchronization ). |
||
Line 762: | Line 754: | ||
Worker 5 item 1 |
Worker 5 item 1 |
||
</pre> |
</pre> |
||
=={{header|FreeBASIC}}== |
=={{header|FreeBASIC}}== |
||
The library ontimer.bi, I have taken it from [https://www.freebasic.net/forum/viewtopic.php?f=7&t=23454 forums of FB]. |
The library ontimer.bi, I have taken it from [https://www.freebasic.net/forum/viewtopic.php?f=7&t=23454 forums of FB]. |
||
<syntaxhighlight lang=freebasic>#include "ontimer.bi" |
<syntaxhighlight lang="freebasic">#include "ontimer.bi" |
||
Randomize Timer |
Randomize Timer |
||
Line 851: | Line 841: | ||
Worker 1 ready and waiting |
Worker 1 ready and waiting |
||
Worker 3 ready and waiting</pre> |
Worker 3 ready and waiting</pre> |
||
=={{header|Go}}== |
=={{header|Go}}== |
||
'''Solution 1, WaitGroup''' |
'''Solution 1, WaitGroup''' |
||
Line 860: | Line 848: | ||
This first solution is a simple interpretation of the task, starting a goroutine (worker) for each part, letting the workers run concurrently, and waiting for them to all indicate completion. This is efficient and idiomatic in Go. |
This first solution is a simple interpretation of the task, starting a goroutine (worker) for each part, letting the workers run concurrently, and waiting for them to all indicate completion. This is efficient and idiomatic in Go. |
||
<syntaxhighlight lang=go>package main |
<syntaxhighlight lang="go">package main |
||
import ( |
import ( |
||
Line 935: | Line 923: | ||
Channels also synchronize, and in addition can send data. The solution shown here is very similar to the WaitGroup solution above but sends data on a channel to simulate a completed part. The channel operations provide synchronization and a WaitGroup is not needed. |
Channels also synchronize, and in addition can send data. The solution shown here is very similar to the WaitGroup solution above but sends data on a channel to simulate a completed part. The channel operations provide synchronization and a WaitGroup is not needed. |
||
<syntaxhighlight lang=go>package main |
<syntaxhighlight lang="go">package main |
||
import ( |
import ( |
||
Line 1,016: | Line 1,004: | ||
not justified. |
not justified. |
||
<syntaxhighlight lang=go>package main |
<syntaxhighlight lang="go">package main |
||
import ( |
import ( |
||
Line 1,126: | Line 1,114: | ||
This solution shows workers joining and leaving, although it is a rather different interpretation of the task. |
This solution shows workers joining and leaving, although it is a rather different interpretation of the task. |
||
<syntaxhighlight lang=go>package main |
<syntaxhighlight lang="go">package main |
||
import ( |
import ( |
||
Line 1,231: | Line 1,219: | ||
worker 6 leaves shop |
worker 6 leaves shop |
||
mechanism 5 completed</pre> |
mechanism 5 completed</pre> |
||
=={{header|Haskell}}== |
=={{header|Haskell}}== |
||
<p>Although not being sure if the approach might be right, this example shows several workers performing a series of tasks simultaneously and synchronizing themselves before starting the next task.</p> |
<p>Although not being sure if the approach might be right, this example shows several workers performing a series of tasks simultaneously and synchronizing themselves before starting the next task.</p> |
||
Line 1,247: | Line 1,234: | ||
<li>For effectful computations, you should use concurrent threads (forkIO and MVar from the module Control.Concurrent), software transactional memory (STM) or alternatives provided by other modules.</li> |
<li>For effectful computations, you should use concurrent threads (forkIO and MVar from the module Control.Concurrent), software transactional memory (STM) or alternatives provided by other modules.</li> |
||
</ul> |
</ul> |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="haskell">import Control.Parallel |
||
data Task a = Idle | Make a |
data Task a = Idle | Make a |
||
Line 1,339: | Line 1,326: | ||
<p>Other than the parallel version above, this code runs in the IO Monad and makes it possible to perform IO actions such as accessing the hardware. However, all actions must have the return type IO (). If the workers must return some useful values, the MVar should be extended with the necessary fields and the workers should use those fields to store the results they produce.</p> |
<p>Other than the parallel version above, this code runs in the IO Monad and makes it possible to perform IO actions such as accessing the hardware. However, all actions must have the return type IO (). If the workers must return some useful values, the MVar should be extended with the necessary fields and the workers should use those fields to store the results they produce.</p> |
||
<p>Note: This code has been tested on GHC 7.6.1 and will most probably not run under other Haskell implementations due to the use of some functions from the module Control.Concurrent. It won't work if compiled with the -O2 compiler switch. Compile with the -threaded compiler switch if you want to run the threads in parallel.</p> |
<p>Note: This code has been tested on GHC 7.6.1 and will most probably not run under other Haskell implementations due to the use of some functions from the module Control.Concurrent. It won't work if compiled with the -O2 compiler switch. Compile with the -threaded compiler switch if you want to run the threads in parallel.</p> |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="haskell">import Control.Concurrent |
||
import Control.Monad -- needed for "forM", "forM_" |
import Control.Monad -- needed for "forM", "forM_" |
||
Line 1,545: | Line 1,532: | ||
The following only works in Unicon: |
The following only works in Unicon: |
||
<syntaxhighlight lang=unicon>global nWorkers, workers, cv |
<syntaxhighlight lang="unicon">global nWorkers, workers, cv |
||
procedure main(A) |
procedure main(A) |
||
Line 1,600: | Line 1,587: | ||
-> |
-> |
||
</pre> |
</pre> |
||
=={{header|J}}== |
=={{header|J}}== |
||
Line 1,607: | Line 1,593: | ||
For example: |
For example: |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="j"> {{for. y do. 0 T.'' end.}} 0>.4-1 T.'' NB. make sure we have some threads |
||
ts=: 6!:0 NB. timestamp |
ts=: 6!:0 NB. timestamp |
||
dl=: 6!:3 NB. delay |
dl=: 6!:3 NB. delay |
||
Line 1,621: | Line 1,607: | ||
Here, we had set up a loop which periodically tracked the time, and waited a second each time through the loop, and repeated the loop a number of times specified at task startup. We ran two tasks, to demonstrate that they were running side-by-side. |
Here, we had set up a loop which periodically tracked the time, and waited a second each time through the loop, and repeated the loop a number of times specified at task startup. We ran two tasks, to demonstrate that they were running side-by-side. |
||
=={{header|Java}}== |
=={{header|Java}}== |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="java">import java.util.Scanner; |
||
import java.util.Random; |
import java.util.Random; |
||
Line 1,751: | Line 1,736: | ||
</pre> |
</pre> |
||
{{works with|Java|1.5+}} |
{{works with|Java|1.5+}} |
||
<syntaxhighlight lang=java5>import java.util.Random; |
<syntaxhighlight lang="java5">import java.util.Random; |
||
import java.util.concurrent.CountDownLatch; |
import java.util.concurrent.CountDownLatch; |
||
Line 1,842: | Line 1,827: | ||
Worker 2 is ready |
Worker 2 is ready |
||
Task 3 complete</pre> |
Task 3 complete</pre> |
||
=={{header|Julia}}== |
=={{header|Julia}}== |
||
Julia has specific macros for checkpoint type synchronization. @async starts an asynchronous task, and multiple @async tasks can be synchronized by wrapping them within the @sync macro statement, which creates a checkpoint for all @async tasks. |
Julia has specific macros for checkpoint type synchronization. @async starts an asynchronous task, and multiple @async tasks can be synchronized by wrapping them within the @sync macro statement, which creates a checkpoint for all @async tasks. |
||
<syntaxhighlight lang=julia> |
<syntaxhighlight lang="julia"> |
||
function runsim(numworkers, runs) |
function runsim(numworkers, runs) |
||
for count in 1:runs |
for count in 1:runs |
||
Line 1,951: | Line 1,935: | ||
Finished all runs. |
Finished all runs. |
||
</pre> |
</pre> |
||
=={{header|Kotlin}}== |
=={{header|Kotlin}}== |
||
{{trans|Java}} |
{{trans|Java}} |
||
<syntaxhighlight lang=scala>// Version 1.2.41 |
<syntaxhighlight lang="scala">// Version 1.2.41 |
||
import java.util.Random |
import java.util.Random |
||
Line 2,057: | Line 2,040: | ||
Worker 5 is ready |
Worker 5 is ready |
||
</pre> |
</pre> |
||
=={{header|Logtalk}}== |
=={{header|Logtalk}}== |
||
The following example can be found in the Logtalk distribution and is used here with permission. It's based on the Erlang solution for this task. Works when using SWI-Prolog, XSB, or YAP as the backend compiler. |
The following example can be found in the Logtalk distribution and is used here with permission. It's based on the Erlang solution for this task. Works when using SWI-Prolog, XSB, or YAP as the backend compiler. |
||
<syntaxhighlight lang=logtalk> |
<syntaxhighlight lang="logtalk"> |
||
:- object(checkpoint). |
:- object(checkpoint). |
||
Line 2,128: | Line 2,110: | ||
</syntaxhighlight> |
</syntaxhighlight> |
||
Output: |
Output: |
||
<syntaxhighlight lang=text> |
<syntaxhighlight lang="text"> |
||
| ?- checkpoint::run. |
| ?- checkpoint::run. |
||
Worker 1 item 3 |
Worker 1 item 3 |
||
Line 2,151: | Line 2,133: | ||
yes |
yes |
||
</syntaxhighlight> |
</syntaxhighlight> |
||
=={{header|Nim}}== |
=={{header|Nim}}== |
||
As in Oforth, the checkpoint is a thread (the main thread) and synchronization is done using channels: |
As in Oforth, the checkpoint is a thread (the main thread) and synchronization is done using channels: |
||
Line 2,158: | Line 2,139: | ||
Working on a task is simulated by sleeping during some time (randomly chosen). |
Working on a task is simulated by sleeping during some time (randomly chosen). |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="nim">import locks |
||
import os |
import os |
||
import random |
import random |
||
Line 2,263: | Line 2,244: | ||
Sending stop order to workers. |
Sending stop order to workers. |
||
All workers stopped.</pre> |
All workers stopped.</pre> |
||
=={{header|Oforth}}== |
=={{header|Oforth}}== |
||
Checkpoint is implemented as a task. It : |
Checkpoint is implemented as a task. It : |
||
Line 2,279: | Line 2,259: | ||
- And waits for $allDone checkpoint return on its personal channel. |
- And waits for $allDone checkpoint return on its personal channel. |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="oforth">: task(n, jobs, myChannel) |
||
while(true) [ |
while(true) [ |
||
System.Out "TASK " << n << " : Beginning my work..." << cr |
System.Out "TASK " << n << " : Beginning my work..." << cr |
||
Line 2,302: | Line 2,282: | ||
#[ checkPoint(n, jobs, channels) ] & |
#[ checkPoint(n, jobs, channels) ] & |
||
n loop: i [ #[ task(i, jobs, channels at(i)) ] & ] ;</syntaxhighlight> |
n loop: i [ #[ task(i, jobs, channels at(i)) ] & ] ;</syntaxhighlight> |
||
=={{header|Perl}}== |
=={{header|Perl}}== |
||
The perlipc man page details several approaches to interprocess communication. Here's one of my favourites: socketpair and fork. I've omitted some error-checking for brevity. |
The perlipc man page details several approaches to interprocess communication. Here's one of my favourites: socketpair and fork. I've omitted some error-checking for brevity. |
||
<syntaxhighlight lang=perl>#!/usr/bin/perl |
<syntaxhighlight lang="perl">#!/usr/bin/perl |
||
use warnings; |
use warnings; |
||
use strict; |
use strict; |
||
Line 2,385: | Line 2,364: | ||
msl@64Lucid:~/perl$ |
msl@64Lucid:~/perl$ |
||
</pre> |
</pre> |
||
=={{header|Phix}}== |
=={{header|Phix}}== |
||
Simple multitasking solution: no locking required, no race condition possible, supports workers leaving and joining. |
Simple multitasking solution: no locking required, no race condition possible, supports workers leaving and joining. |
||
<!--<syntaxhighlight lang= |
<!--<syntaxhighlight lang="phix">(notonline)--> |
||
<span style="color: #000080;font-style:italic;">-- demo\rosetta\checkpoint_synchronisation.exw</span> |
<span style="color: #000080;font-style:italic;">-- demo\rosetta\checkpoint_synchronisation.exw</span> |
||
<span style="color: #008080;">without</span> <span style="color: #008080;">js</span> <span style="color: #000080;font-style:italic;">-- task_xxx(), get_key()</span> |
<span style="color: #008080;">without</span> <span style="color: #008080;">js</span> <span style="color: #000080;font-style:italic;">-- task_xxx(), get_key()</span> |
||
Line 2,519: | Line 2,497: | ||
worker B leaves |
worker B leaves |
||
</pre> |
</pre> |
||
=={{header|PicoLisp}}== |
=={{header|PicoLisp}}== |
||
The following solution implements each worker as a coroutine. Therefore, it |
The following solution implements each worker as a coroutine. Therefore, it |
||
Line 2,531: | Line 2,508: | ||
'worker' takes a number of steps to perform. It "works" by printing each step, |
'worker' takes a number of steps to perform. It "works" by printing each step, |
||
and returning NIL when done. |
and returning NIL when done. |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="picolisp">(de checkpoints (Projects Workers) |
||
(for P Projects |
(for P Projects |
||
(prinl "Starting project number " P ":") |
(prinl "Starting project number " P ":") |
||
Line 2,581: | Line 2,558: | ||
Worker 1 step 4 |
Worker 1 step 4 |
||
Project number 2 is done.</pre> |
Project number 2 is done.</pre> |
||
=={{header|PureBasic}}== |
=={{header|PureBasic}}== |
||
PureBasic normally uses Semaphores and Mutex’s to synchronize parallel systems. This system only relies on semaphores between each thread and the controller (CheckPoint-procedure). For exchanging data a Mutex based message stack could easily be added, both synchronized according to this specific task or non-blocking if each worker could be allowed that freedom. |
PureBasic normally uses Semaphores and Mutex’s to synchronize parallel systems. This system only relies on semaphores between each thread and the controller (CheckPoint-procedure). For exchanging data a Mutex based message stack could easily be added, both synchronized according to this specific task or non-blocking if each worker could be allowed that freedom. |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="purebasic">#MaxWorktime=8000 ; "Workday" in msec |
||
; Structure that each thread uses |
; Structure that each thread uses |
||
Line 2,794: | Line 2,770: | ||
Thread #1 is done. |
Thread #1 is done. |
||
Press ENTER to exit</pre> |
Press ENTER to exit</pre> |
||
=={{header|Python}}== |
=={{header|Python}}== |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="python"> |
||
""" |
""" |
||
Line 2,846: | Line 2,821: | ||
Exiting worker2 |
Exiting worker2 |
||
</pre> |
</pre> |
||
=={{header|Racket}}== |
=={{header|Racket}}== |
||
This solution uses a double barrier to synchronize the five threads. |
This solution uses a double barrier to synchronize the five threads. |
||
The method can be found on page 41 of the delightful book |
The method can be found on page 41 of the delightful book |
||
[http://greenteapress.com/semaphores/downey08semaphores.pdf "The Little Book of Semaphores"] by Allen B. Downey. |
[http://greenteapress.com/semaphores/downey08semaphores.pdf "The Little Book of Semaphores"] by Allen B. Downey. |
||
<syntaxhighlight lang=racket> |
<syntaxhighlight lang="racket"> |
||
#lang racket |
#lang racket |
||
(define t 5) ; total number of threads |
(define t 5) ; total number of threads |
||
Line 2,898: | Line 2,872: | ||
</syntaxhighlight> |
</syntaxhighlight> |
||
Output: |
Output: |
||
<syntaxhighlight lang=racket> |
<syntaxhighlight lang="racket"> |
||
(1 4 2 0 3) |
(1 4 2 0 3) |
||
(6 9 7 8 5) |
(6 9 7 8 5) |
||
Line 2,921: | Line 2,895: | ||
... |
... |
||
</syntaxhighlight> |
</syntaxhighlight> |
||
=={{header|Raku}}== |
=={{header|Raku}}== |
||
(formerly Perl 6) |
(formerly Perl 6) |
||
<syntaxhighlight lang=raku line>my $TotalWorkers = 3; |
<syntaxhighlight lang="raku" line>my $TotalWorkers = 3; |
||
my $BatchToRun = 3; |
my $BatchToRun = 3; |
||
my @TimeTaken = (5..15); # in seconds |
my @TimeTaken = (5..15); # in seconds |
||
Line 2,984: | Line 2,957: | ||
>>>>> batch 2 completed. |
>>>>> batch 2 completed. |
||
</pre> |
</pre> |
||
=={{header|Ruby}}== |
=={{header|Ruby}}== |
||
{{needs-review|Ruby|This code might or might not do the correct task. See comment at [[Talk:{{PAGENAME}}]].}} |
{{needs-review|Ruby|This code might or might not do the correct task. See comment at [[Talk:{{PAGENAME}}]].}} |
||
<syntaxhighlight lang=ruby>require 'socket' |
<syntaxhighlight lang="ruby">require 'socket' |
||
# A Workshop runs all of its workers, then collects their results. Use |
# A Workshop runs all of its workers, then collects their results. Use |
||
Line 3,161: | Line 3,133: | ||
4494=>[5, 4, 1166220]} |
4494=>[5, 4, 1166220]} |
||
{}</pre> |
{}</pre> |
||
=={{header|Rust}}== |
=={{header|Rust}}== |
||
<syntaxhighlight lang=rust> |
<syntaxhighlight lang="rust"> |
||
//! We implement this task using Rust's Barriers. Barriers are simply thread synchronization |
//! We implement this task using Rust's Barriers. Barriers are simply thread synchronization |
||
//! points--if a task waits at a barrier, it will not continue until the number of tasks for which |
//! points--if a task waits at a barrier, it will not continue until the number of tasks for which |
||
Line 3,230: | Line 3,201: | ||
} |
} |
||
</syntaxhighlight> |
</syntaxhighlight> |
||
=={{header|Scala}}== |
=={{header|Scala}}== |
||
<syntaxhighlight lang= |
<syntaxhighlight lang="scala">import java.util.{Random, Scanner} |
||
object CheckpointSync extends App { |
object CheckpointSync extends App { |
||
Line 3,317: | Line 3,286: | ||
}</syntaxhighlight> |
}</syntaxhighlight> |
||
=={{header|Tcl}}== |
=={{header|Tcl}}== |
||
This implementation works by having a separate thread handle the synchronization (inter-thread message delivery already being serialized). The alternative, using a read-write mutex, is more complex and more likely to run into trouble with multi-core machines. |
This implementation works by having a separate thread handle the synchronization (inter-thread message delivery already being serialized). The alternative, using a read-write mutex, is more complex and more likely to run into trouble with multi-core machines. |
||
<syntaxhighlight lang=tcl>package require Tcl 8.5 |
<syntaxhighlight lang="tcl">package require Tcl 8.5 |
||
package require Thread |
package require Thread |
||
Line 3,419: | Line 3,387: | ||
Demonstration of how this works. |
Demonstration of how this works. |
||
{{trans|Ada}} |
{{trans|Ada}} |
||
<syntaxhighlight lang=tcl># Build the workers |
<syntaxhighlight lang="tcl"># Build the workers |
||
foreach worker {A B C D} { |
foreach worker {A B C D} { |
||
dict set ids $worker [checkpoint makeThread { |
dict set ids $worker [checkpoint makeThread { |
||
Line 3,491: | Line 3,459: | ||
B is ready |
B is ready |
||
D is ready</pre> |
D is ready</pre> |
||
=={{header|Wren}}== |
=={{header|Wren}}== |
||
{{trans|Kotlin}} |
{{trans|Kotlin}} |
||
{{libheader|Wren-ioutil}} |
{{libheader|Wren-ioutil}} |
||
<syntaxhighlight lang=ecmascript>import "random" for Random |
<syntaxhighlight lang="ecmascript">import "random" for Random |
||
import "scheduler" for Scheduler |
import "scheduler" for Scheduler |
||
import "timer" for Timer |
import "timer" for Timer |
||
Line 3,579: | Line 3,546: | ||
Worker 5 is ready. |
Worker 5 is ready. |
||
</pre> |
</pre> |
||
=={{header|zkl}}== |
=={{header|zkl}}== |
||
Simulate a pool of workers, each making one part, waiting for the part to be requested and then putting the part on a conveyor belt to be sent to the station that assembles all parts into a product. After shipping the part, it turns off the request flag. |
Simulate a pool of workers, each making one part, waiting for the part to be requested and then putting the part on a conveyor belt to be sent to the station that assembles all parts into a product. After shipping the part, it turns off the request flag. |
||
The consumer requests a part it doesn't have, waits for a part and puts the received part (which might not be the requested one (if buggy code)) in a bin and assembles the parts into a product. |
The consumer requests a part it doesn't have, waits for a part and puts the received part (which might not be the requested one (if buggy code)) in a bin and assembles the parts into a product. |
||
Repeat until all requested products are made. |
Repeat until all requested products are made. |
||
<syntaxhighlight lang=zkl>const NUM_PARTS=5; // number of parts used to make the product |
<syntaxhighlight lang="zkl">const NUM_PARTS=5; // number of parts used to make the product |
||
var requested=Atomic.Int(-1); // the id of the part the consumer needs |
var requested=Atomic.Int(-1); // the id of the part the consumer needs |
||
var pipe=Thread.Pipe(); // "conveyor belt" of parts to consumer |
var pipe=Thread.Pipe(); // "conveyor belt" of parts to consumer |
||
Line 3,627: | Line 3,593: | ||
Done |
Done |
||
</pre> |
</pre> |
||
{{omit from|Axe}} |
|||
{{omit from|Maxima}} |
|||
{{omit from|ML/I}} |
{{omit from|ML/I}} |
||
{{omit from|Maxima}} |
|||
{{omit from|Axe}} |