Tokenize a string: Difference between revisions

m
→‎{{header|Wren}}: Changed to Wren S/H
m (→‎{{header|Wren}}: Changed to Wren S/H)
 
(47 intermediate revisions by 29 users not shown)
Line 15:
{{trans|Python}}
 
<langsyntaxhighlight lang="11l">V text = ‘Hello,How,Are,You,Today’
V tokens = text.split(‘,’)
print(tokens.join(‘.’))</langsyntaxhighlight>
 
{{out}}
Line 25:
 
=={{header|360 Assembly}}==
<langsyntaxhighlight lang="360asm">* Tokenize a string - 08/06/2018
TOKSTR CSECT
USING TOKSTR,R13 base register
Line 107:
PG DC CL80' ' buffer
YREGS
END TOKSTR</langsyntaxhighlight>
{{out}}
<pre>
Line 114:
 
=={{header|8080 Assembly}}==
<langsyntaxhighlight lang="8080asm">puts: equ 9
org 100h
jmp demo
Line 161:
period: db '. $'
hello: db 'Hello,How,Are,You,Today$'
parts: equ $</langsyntaxhighlight>
{{out}}
<pre>Hello. How. Are. You. Today.</pre>
=={{header|8086 Assembly}}==
<langsyntaxhighlight lang="asm"> cpu 8086
org 100h
section .text
Line 203:
hello: db 'Hello,How,Are,You,Today$'
section .bss
parts: resw 10</langsyntaxhighlight>
{{out}}
<pre>Hello. How. Are. You. Today. </pre>
Line 209:
=={{header|AArch64 Assembly}}==
{{works with|as|Raspberry Pi 3B version Buster 64 bits}}
<syntaxhighlight lang="aarch64 assembly">
<lang AArch64 Assembly>
/* ARM assembly AARCH64 Raspberry PI 3B */
/* program strTokenize64.s */
Line 349:
/* for this file see task include a file in language AArch64 assembly */
.include "../includeARM64.inc"
</syntaxhighlight>
</lang>
{{Output}}
<pre>
Line 361:
 
=={{header|ACL2}}==
<langsyntaxhighlight lang="lisp">(defun split-at (xs delim)
(if (or (endp xs) (eql (first xs) delim))
(mv nil (rest xs))
Line 389:
(progn$ (cw (first strs))
(cw (coerce (list delim) 'string))
(print-with (rest strs) delim))))</langsyntaxhighlight>
 
{{out}}
<pre>&gt; (print-with (split-str "Hello,How,Are,You,Today" #\,) #\.)
Hello.How.Are.You.Today.</pre>
 
=={{header|Action!}}==
The user must type in the monitor the following command after compilation and before running the program!<pre>SET EndProg=*</pre>
{{libheader|Action! Tool Kit}}
<syntaxhighlight lang="action!">CARD EndProg ;required for ALLOCATE.ACT
 
INCLUDE "D2:ALLOCATE.ACT" ;from the Action! Tool Kit. You must type 'SET EndProg=*' from the monitor after compiling, but before running this program!
 
DEFINE PTR="CARD"
 
BYTE FUNC Split(CHAR ARRAY s CHAR c PTR ARRAY items)
BYTE i,count,start,len
CHAR ARRAY item
 
IF s(0)=0 THEN RETURN (0) FI
 
i=1 count=0
WHILE i<s(0)
DO
start=i
WHILE i<=s(0) AND s(i)#c
DO
i==+1
OD
len=i-start
item=Alloc(len+1)
SCopyS(item,s,start,i-1)
items(count)=item
count==+1
i==+1
OD
RETURN (count)
 
PROC Join(PTR ARRAY items BYTE count CHAR c CHAR ARRAY s)
BYTE i,pos
CHAR POINTER srcPtr,dstPtr
CHAR ARRAY item
 
s(0)=0
IF count=0 THEN RETURN FI
 
pos=1
FOR i=0 TO count-1
DO
item=items(i)
srcPtr=item+1
dstPtr=s+pos
MoveBlock(dstPtr,srcPtr,item(0))
pos==+item(0)
IF i<count-1 THEN
s(pos)='.
pos==+1
FI
OD
s(0)=pos-1
RETURN
 
PROC Clear(PTR ARRAY items BYTE POINTER count)
BYTE i
CHAR ARRAY item
 
IF count^=0 THEN RETURN FI
 
FOR i=0 TO count^-1
DO
item=items(i)
Free(item,item(0)+1)
OD
count^=0
RETURN
 
PROC Main()
CHAR ARRAY s="Hello,How,Are,You,Today"
CHAR ARRAY r(256)
PTR ARRAY items(100)
BYTE i,count
 
Put(125) PutE() ;clear screen
AllocInit(0)
count=Split(s,',,items)
Join(items,count,'.,r)
 
PrintF("Input:%E""%S""%E%E",s)
PrintE("Split:")
FOR i=0 TO count-1
DO
PrintF("""%S""",items(i))
IF i<count-1 THEN
Print(", ")
ELSE
PutE() PutE()
FI
OD
PrintF("Join:%E""%S""%E",r)
Clear(items,@count)
RETURN</syntaxhighlight>
{{out}}
[https://gitlab.com/amarok8bit/action-rosetta-code/-/raw/master/images/Tokenize_a_string.png Screenshot from Atari 8-bit computer]
<pre>
Input:
"Hello,How,Are,You,Today"
 
Split:
"Hello", "How", "Are", "You", "Today"
 
Join:
"Hello.How.Are.You.Today"
</pre>
 
=={{header|ActionScript}}==
<langsyntaxhighlight lang="actionscript">var hello:String = "Hello,How,Are,You,Today";
var tokens:Array = hello.split(",");
trace(tokens.join("."));
 
// Or as a one-liner
trace("Hello,How,Are,You,Today".split(",").join("."));</langsyntaxhighlight>
 
=={{header|Ada}}==
<langsyntaxhighlight lang="ada">with Ada.Text_IO, Ada.Containers.Indefinite_Vectors, Ada.Strings.Fixed, Ada.Strings.Maps;
use Ada.Text_IO, Ada.Containers, Ada.Strings, Ada.Strings.Fixed, Ada.Strings.Maps;
 
Line 424 ⟶ 534:
Put (S & ".");
end loop;
end Tokenize;</langsyntaxhighlight>
 
=={{header|ALGOL 68}}==
<langsyntaxhighlight lang="algol68">main:(
 
OP +:= = (REF FLEX[]STRING in out, STRING item)VOID:(
Line 475 ⟶ 585:
printf(($g"."$, string split(beetles, ", "),$l$));
printf(($g"."$, char split(beetles, ", "),$l$))
)</langsyntaxhighlight>
{{out}}
<pre>
John Lennon.Paul McCartney.George Harrison.Ringo Starr.
John.Lennon..Paul.McCartney..George.Harrison..Ringo.Starr.
</pre>
 
=={{header|Amazing Hopper}}==
 
Hopper provides instructions for separating and modifying tokens from a string.
Let "s" be a string; "n" token number:
 
1) {n}, $(s) ==> gets token "n" from string "s".
 
2) {"word", n} $$(s) ==> replace token "n" of "s", with "word".
 
Note: the "splitnumber" macro cannot separate a number converted to a string by the "XTOSTR" function, because this function "rounds" the number to the decimal position by default.
 
<syntaxhighlight lang="hopper">
#include <hopper.h>
 
#proto splitdate(_DATETIME_)
#proto splitnumber(_N_)
#proto split(_S_,_T_)
 
main:
s="this string will be separated into parts with space token separator"
aS=0,let( aS :=_split(s," "))
{","}toksep // set a new token separator
{"String: ",s}
{"\nArray:\n",aS},
{"\nSize="}size(aS),println // "size" return an array: {dims,#rows,#cols,#pages}
{"\nOriginal number: ",-125.489922},println
w=0,let(w:=_split number(-125.489922) )
{"Integer part: "}[1]get(w) // get first element from array "w"
{"\nDecimal part: "}[2]get(w),println // get second element from array "w"
{"\nDate by DATENOW(TODAY) macro: "},print
dt=0, let( dt :=_splitdate(datenow(TODAY);!puts)) // "!" keep first element from stack
{"\nDate: "}[1]get(dt)
{"\nTime: "}[2]get(dt),println
 
exit(0)
 
.locals
splitdate(_DATETIME_)
_SEP_=0,gettoksep,mov(_SEP_) // "gettoksep" return actual token separator
{","}toksep, // set a new token separator
_NEWARRAY_={}
{1},$( _DATETIME_ ),
{2},$( _DATETIME_ ),pushall(_NEWARRAY_)
{_SEP_}toksep // restore ols token separator
{_NEWARRAY_}
back
 
splitnumber(_X_)
part_int=0,part_dec=0,
{_X_},!trunc,mov(part_int),
minus(part_int), !sign,mul
xtostr,mov(part_dec), part_dec+=2, // "part_dec+=2", delete "0." from "part_dec"
{part_dec}xtonum,mov(part_dec)
_NEWARRAY_={},{part_int,part_dec},pushall(_NEWARRAY_)
{_NEWARRAY_}
back
 
split(_S_,_T_)
_NEWARRAY_={},_VAR1_=0,_SEP_=0,gettoksep,mov(_SEP_)
{_T_}toksep,totaltoken(_S_),
mov(_VAR1_), // for total tokens
_VAR2_=1, // for real position of tokens into the string
___SPLIT_ITER:
{_VAR2_}$( _S_ ),push(_NEWARRAY_)
++_VAR2_,--_VAR1_
{ _VAR1_ },jnz(___SPLIT_ITER) // jump to "___SPLIT_ITER" if "_VAR1_" is not zero.
clear(_VAR2_),clear(_VAR1_)
{_SEP_}toksep
{_NEWARRAY_}
back
 
</syntaxhighlight>
{{Out}}
<pre>Output:
 
String: this string will be separated into parts with space token separator
Array:
this,string,will,be,separated,into,parts,with,space,token,separator
Size=1,11
 
Original number: -125.49
Integer part: -125
Decimal part: 489922
 
Date by DATENOW(TODAY) macro: 22/11/2021,18:41:20:13
Date: 22/11/2021
Time: 18:41:20:13
 
</pre>
 
=={{header|APL}}==
<langsyntaxhighlight APLlang="apl"> '.',⍨¨ ','(≠⊆⊢)'abc,123,X' ⍝ [1] Do the split: ','(≠⊆⊢)'abc,123,X'; [2] append the periods: '.',⍨¨
abc. 123. X. ⍝ 3 strings (char vectors), each with a period at the end.
</syntaxhighlight>
</lang>
 
=={{header|AppleScript}}==
 
<langsyntaxhighlight lang="applescript">on run
intercalate(".", splitOn(",", "Hello,How,Are,You,Today"))
end run
Line 508 ⟶ 713:
set my text item delimiters to dlm
return strJoined
end intercalate</langsyntaxhighlight>
{{Out}}
<pre>Hello.How.Are.You.Today</pre>
Line 514 ⟶ 719:
Or,
 
<langsyntaxhighlight AppleScriptlang="applescript">set my text item delimiters to ","
set tokens to the text items of "Hello,How,Are,You,Today"
 
set my text item delimiters to "."
log tokens as text</langsyntaxhighlight>
 
{{Out}}
Line 526 ⟶ 731:
=={{header|ARM Assembly}}==
{{works with|as|Raspberry Pi}}
<syntaxhighlight lang="arm assembly">
<lang ARM Assembly>
 
/* ARM assembly Raspberry PI */
Line 690 ⟶ 895:
bx lr
</syntaxhighlight>
</lang>
 
=={{header|Arturo}}==
<langsyntaxhighlight lang="rebol">str: "Hello,How,Are,You,Today"
 
print join.with:"." split.by:"," str</langsyntaxhighlight>
 
{{out}}
Line 702 ⟶ 907:
 
=={{header|Astro}}==
<langsyntaxhighlight lang="python">let text = 'Hello,How,Are,You,Today'
let tokens = text.split(||,||)
print tokens.join(with: '.')</langsyntaxhighlight>
 
=={{header|AutoHotkey}}==
<langsyntaxhighlight AutoHotkeylang="autohotkey">string := "Hello,How,Are,You,Today"
stringsplit, string, string, `,
loop, % string0
{
msgbox % string%A_Index%
}</langsyntaxhighlight>
 
=={{header|AWK}}==
 
<langsyntaxhighlight lang="awk">BEGIN {
s = "Hello,How,Are,You,Today"
split(s, arr, ",")
Line 723 ⟶ 928:
}
print
}</langsyntaxhighlight>
 
A more ''idiomatic'' way for AWK is
 
<langsyntaxhighlight lang="awk">BEGIN { FS = "," }
{
for(i=1; i <= NF; i++) printf $i ".";
print ""
}</langsyntaxhighlight>
 
which "tokenize" each line of input and this is achieved by using "," as field separator
Line 737 ⟶ 942:
=={{header|BASIC}}==
==={{header|Applesoft BASIC}}===
<langsyntaxhighlight ApplesoftBasiclang="applesoftbasic">100 T$ = "HELLO,HOW,ARE,YOU,TODAY"
110 GOSUB 200"TOKENIZE
120 FOR I = 1 TO N
Line 756 ⟶ 961:
290 A$(N) = A$(N) + C$
300 NEXT TI
310 RETURN</langsyntaxhighlight>
 
==={{header|BaCon}}===
BaCon includes extensive support for ''delimited strings''.
<langsyntaxhighlight lang="bacon">OPTION BASE 1
 
string$ = "Hello,How,Are,You,Today"
Line 771 ⟶ 976:
 
' Or simply replace the delimiter
PRINT DELIM$(string$, ",", ".")</langsyntaxhighlight>
 
{{out}}
Line 777 ⟶ 982:
Hello.How.Are.You.Today
Hello.How.Are.You.Today</pre>
 
==={{header|BASIC256}}===
<syntaxhighlight lang="basic256">instring$ = "Hello,How,Are,You,Today"
 
tokens$ = explode(instring$,",")
for i = 0 to tokens$[?]-1
print tokens$[i]; ".";
next i
end</syntaxhighlight>
 
 
==={{header|BBC BASIC}}===
{{works with|BBC BASIC for Windows}}
<langsyntaxhighlight lang="bbcbasic"> INSTALL @lib$+"STRINGLIB"
text$ = "Hello,How,Are,You,Today"
Line 787 ⟶ 1,002:
PRINT array$(i%) "." ;
NEXT
PRINT</langsyntaxhighlight>
 
==={{header|Chipmunk Basic}}===
Solutions [[#Applesoft BASIC|Applesoft BASIC]] and [[#Commodore BASIC|Commodore BASIC]] work without changes.
 
==={{header|Commodore BASIC}}===
Based on the AppleSoft BASIC version.
<langsyntaxhighlight lang="commodorebasic">10 REM TOKENIZE A STRING ... ROSETTACODE.ORG
10 REM TOKENIZE A STRING ... ROSETTACODE.ORG
20 T$ = "HELLO,HOW,ARE,YOU,TODAY"
30 GOSUB 200, TOKENIZE
Line 808 ⟶ 1,025:
260 N = N + 1
270 NEXT L
280 RETURN</syntaxhighlight>
 
</lang>
==={{header|FreeBASIC}}===
<langsyntaxhighlight lang="freebasic">sub tokenize( instring as string, tokens() as string, sep as string )
redim tokens(0 to 0) as string
dim as string*1 ch
Line 832 ⟶ 1,049:
for i as uinteger = 0 to ubound(tokens)
print tokens(i);".";
next i</langsyntaxhighlight>
 
==={{header|Liberty BASIC}}===
<langsyntaxhighlight lang="lb">'Note that Liberty Basic's array usage can reach element #10 before having to DIM the array
For i = 0 To 4
array$(i) = Word$("Hello,How,Are,You,Today", (i + 1), ",")
Line 841 ⟶ 1,058:
Next i
 
Print Left$(array$, (Len(array$) - 1))</langsyntaxhighlight>
 
==={{header|PowerBASICMSX Basic}}===
The [[#Commodore BASIC|Commodore BASIC]] solution works without any changes.
 
==={{header|PowerBASIC}}===
PowerBASIC has a few keywords that make parsing strings trivial: <code>PARSE</code>, <code>PARSE$</code>, and <code>PARSECOUNT</code>. (<code>PARSE$</code>, not shown here, is for extracting tokens one at a time, while <code>PARSE</code> extracts all tokens at once into an array. <code>PARSECOUNT</code> returns the number of tokens found.)
 
<langsyntaxhighlight lang="powerbasic">FUNCTION PBMAIN () AS LONG
DIM parseMe AS STRING
parseMe = "Hello,How,Are,You,Today"
Line 861 ⟶ 1,080:
 
MSGBOX outP
END FUNCTION</langsyntaxhighlight>
 
==={{header|PureBasic}}===
 
'''As described
<langsyntaxhighlight PureBasiclang="purebasic">NewList MyStrings.s()
 
For i=1 To 5
Line 875 ⟶ 1,094:
ForEach MyStrings()
Print(MyStrings()+".")
Next</langsyntaxhighlight>
 
'''Still, easier would be
<langsyntaxhighlight PureBasiclang="purebasic">Print(ReplaceString("Hello,How,Are,You,Today",",","."))</langsyntaxhighlight>
 
==={{header|QBasic}}===
<langsyntaxhighlight lang="qbasic">DIM parseMe AS STRING
parseMe = "Hello,How,Are,You,Today"
 
Line 925 ⟶ 1,144:
PRINT "."; parsed(L0);
NEXT
END IF</langsyntaxhighlight>
 
==={{header|Run BASIC}}===
<langsyntaxhighlight lang="runbasic">text$ = "Hello,How,Are,You,Today"
FOR i = 1 to 5
textArray$(i) = word$(text$,i,",")
print textArray$(i);" ";
NEXT</langsyntaxhighlight>
 
==={{header|VBScript}}===
====One liner====
<langsyntaxhighlight lang="vb">WScript.Echo Join(Split("Hello,How,Are,You,Today", ","), ".")</langsyntaxhighlight>
 
In fact, the Visual Basic solution (below) could have done the same, as Join() is available.
Line 945 ⟶ 1,164:
Unlike PowerBASIC, there is no need to know beforehand how many tokens are in the string -- <code>Split</code> automagically builds the array for you.
 
<langsyntaxhighlight lang="vb">Sub Main()
Dim parseMe As String, parsed As Variant
parseMe = "Hello,How,Are,You,Today"
Line 958 ⟶ 1,177:
 
MsgBox outP
End Sub</langsyntaxhighlight>
 
=={{header|Batch File}}==
<langsyntaxhighlight lang="dos">@echo off
setlocal enabledelayedexpansion
call :tokenize %1 res
Line 972 ⟶ 1,191:
for %%i in (%str%) do set %2=!%2!.%%i
set %2=!%2:~1!
goto :eof</langsyntaxhighlight>
 
''Demo''
>tokenize.cmd "Hello,How,Are,You,Today"
Hello.How.Are.You.Today
 
=={{header|BQN}}==
Uses a splitting idiom from bqncrate.
<syntaxhighlight lang="bqn">Split ← (+`׬)⊸-∘= ⊔ ⊢
 
∾⟜'.'⊸∾´ ',' Split "Hello,How,Are,You,Today"</syntaxhighlight>
{{out}}
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Bracmat}}==
Solution that employs string pattern matching to spot the commas
<langsyntaxhighlight lang="bracmat">( "Hello,How,Are,You,Today":?String
& :?ReverseList
& whl
Line 992 ⟶ 1,219:
)
& out$!List
)</langsyntaxhighlight>
Solution that starts by evaluating the input and employs the circumstance that the comma is a list constructing binary operator and that the string does not contain any other characters that are interpreted as operators on evaluation.
<langsyntaxhighlight lang="bracmat">( get$("Hello,How,Are,You,Today",MEM):?CommaseparatedList
& :?ReverseList
& whl
Line 1,006 ⟶ 1,233:
)
& out$!List
)</langsyntaxhighlight>
 
=={{header|C}}==
Line 1,015 ⟶ 1,242:
This example uses the ''strtok()'' function to separate the tokens. This function is destructive (replacing token separators with '\0'), so we have to make a copy of the string (using ''strdup()'') before tokenizing. ''strdup()'' is not part of [[ANSI C]], but is available on most platforms. It can easily be implemented with a combination of ''strlen()'', ''malloc()'', and ''strcpy()''.
 
<langsyntaxhighlight lang="c">#include<string.h>
#include<stdio.h>
#include<stdlib.h>
Line 1,036 ⟶ 1,263:
 
return 0;
}</langsyntaxhighlight>
 
Another way to accomplish the task without the built-in string functions is to temporarily modify the separator character. This method does not need any additional memory, but requires the input string to be writeable.
<langsyntaxhighlight lang="c">#include<stdio.h>
 
typedef void (*callbackfunc)(const char *);
Line 1,064 ⟶ 1,291:
tokenize(array, ',', doprint);
return 0;
}</langsyntaxhighlight>
 
=={{header|C sharp|C#}}==
<langsyntaxhighlight lang="csharp">string str = "Hello,How,Are,You,Today";
// or Regex.Split ( "Hello,How,Are,You,Today", "," );
// (Regex is in System.Text.RegularExpressions namespace)
string[] strings = str.Split(',');
Console.WriteLine(String.Join(".", strings));
</syntaxhighlight>
</lang>
 
=={{header|C++}}==
Line 1,079 ⟶ 1,306:
std::getline() is typically used to tokenize strings on a single-character delimiter
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <sstream>
#include <vector>
Line 1,094 ⟶ 1,321:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."));
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++98}}
C++ allows the user to redefine what is considered whitespace. If the delimiter is whitespace, tokenization becomes effortless.
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <locale>
#include <sstream>
Line 1,123 ⟶ 1,350:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."));
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++98}}
Line 1,129 ⟶ 1,356:
The boost library has multiple options for easy tokenization.
 
<langsyntaxhighlight lang="cpp">#include <string>
#include <vector>
#include <iterator>
Line 1,142 ⟶ 1,369:
copy(v.begin(), v.end(), std::ostream_iterator<std::string>(std::cout, "."))
std::cout << '\n';
}</langsyntaxhighlight>
 
{{works with|C++23}}
C++20 and C++23 drastically improve the ergonomics of simple manipulation of ranges.
 
<syntaxhighlight lang="cpp">#include <string>
#include <ranges>
#include <iostream>
int main() {
std::string s = "Hello,How,Are,You,Today";
s = s // Assign the final string back to the string variable
| std::views::split(',') // Produce a range of the comma separated words
| std::views::join_with('.') // Concatenate the words into a single range of characters
| std::ranges::to<std::string>(); // Convert the range of characters into a regular string
std::cout << s;
}</syntaxhighlight>
 
=={{header|Ceylon}}==
{{works with|Ceylon 1.2}}
<langsyntaxhighlight lang="ceylon">shared void tokenizeAString() {
value input = "Hello,How,Are,You,Today";
value tokens = input.split(','.equals);
print(".".join(tokens));
}</langsyntaxhighlight>
 
=={{header|CFEngine}}==
<syntaxhighlight lang="cfengine">bundle agent main
{
reports:
"${with}" with => join(".", splitstring("Hello,How,Are,You,Today", ",", 99));
}
</syntaxhighlight>
{{out}}
<pre>cf-agent -KIf ./tokenize-a-string.cf
R: Hello.How.Are.You.Today</pre>
 
See https://docs.cfengine.com/docs/master/reference-functions.html for a complete list of available functions.
 
=={{header|Clojure}}==
Using native Clojure functions and Java Interop:
<langsyntaxhighlight lang="clojure">(apply str (interpose "." (.split #"," "Hello,How,Are,You,Today")))</langsyntaxhighlight>
 
Using the clojure.string library:
<langsyntaxhighlight lang="clojure">(clojure.string/join "." (clojure.string/split "Hello,How,Are,You,Today" #","))</langsyntaxhighlight>
 
=={{header|CLU}}==
<syntaxhighlight lang="clu">% This iterator splits the string on a given character,
% and returns each substring in order.
tokenize = iter (s: string, c: char) yields (string)
while ~string$empty(s) do
next: int := string$indexc(c, s)
if next = 0 then
yield(s)
break
else
yield(string$substr(s, 1, next-1))
s := string$rest(s, next+1)
end
end
end tokenize
 
start_up = proc ()
po: stream := stream$primary_output()
str: string := "Hello,How,Are,You,Today"
for part: string in tokenize(str, ',') do
stream$putl(po, part || ".")
end
end start_up</syntaxhighlight>
{{out}}
<pre>Hello.
How.
Are.
You.
Today.</pre>
 
=={{header|COBOL}}==
This can be made to handle more complex cases; UNSTRING allows multiple delimiters, capture of which delimiter was used for each field, a POINTER for starting position (set on ending), along with match TALLYING.
 
<syntaxhighlight lang="cobol">
<lang COBOL>
identification division.
program-id. tokenize.
Line 1,193 ⟶ 1,479:
goback.
end program tokenize.
</syntaxhighlight>
</lang>
 
{{out}}
Line 1,203 ⟶ 1,489:
=={{header|CoffeeScript}}==
 
<langsyntaxhighlight lang="coffeescript">
arr = "Hello,How,Are,You,Today".split ","
console.log arr.join "."
</syntaxhighlight>
</lang>
 
=={{header|ColdFusion}}==
=== Classic tag based CFML ===
<langsyntaxhighlight lang="cfm">
<cfoutput>
<cfset wordListTag = "Hello,How,Are,You,Today">
#Replace( wordListTag, ",", ".", "all" )#
</cfoutput>
</syntaxhighlight>
</lang>
{{Output}}
<pre>
Line 1,222 ⟶ 1,508:
 
=== Script Based CFML ===
<langsyntaxhighlight lang="cfm"><cfscript>
wordList = "Hello,How,Are,You,Today";
splitList = replace( wordList, ",", ".", "all" );
writeOutput( splitList );
</cfscript></langsyntaxhighlight>
{{Output}}
<pre>
Line 1,236 ⟶ 1,522:
There are libraries out there that handle splitting (e.g., [http://www.cliki.net/SPLIT-SEQUENCE SPLIT-SEQUENCE], and the more-general [http://weitz.de/cl-ppcre/ CL-PPCRE]), but this is a simple one-off, too. When the words are written with write-with-periods, there is no final period after the last word.
 
<langsyntaxhighlight lang="lisp">(defun comma-split (string)
(loop for start = 0 then (1+ finish)
for finish = (position #\, string :start start)
Line 1,243 ⟶ 1,529:
 
(defun write-with-periods (strings)
(format t "~{~A~^.~}" strings))</langsyntaxhighlight>
 
=={{header|Cowgol}}==
<langsyntaxhighlight lang="cowgol">include "cowgol.coh";
include "strings.coh";
 
Line 1,285 ⟶ 1,571:
print(".\n");
i := i + 1;
end loop;</langsyntaxhighlight>
{{out}}
<pre>Hello.
Line 1,292 ⟶ 1,578:
You.
Today.</pre>
 
=={{header|Crystal}}==
<syntaxhighlight lang="crystal">puts "Hello,How,Are,You,Today".split(',').join('.')</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|D}}==
<langsyntaxhighlight lang="d">void main() {
import std.stdio, std.string;
 
"Hello,How,Are,You,Today".split(',').join('.').writeln;
}</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
Line 1,305 ⟶ 1,596:
=== Using String.split ===
{{libheader| System.SysUtils}}
<syntaxhighlight lang="delphi">
<lang Delphi>
program Tokenize_a_string;
 
Line 1,323 ⟶ 1,614:
end.
 
</syntaxhighlight>
</lang>
 
=== Using TStringList ===
<syntaxhighlight lang="delphi">
<lang Delphi>
program TokenizeString;
 
Line 1,359 ⟶ 1,650:
 
end.
</syntaxhighlight>
</lang>
 
The result is:
 
<syntaxhighlight lang="delphi">
<lang Delphi>
Hello
How
Line 1,369 ⟶ 1,660:
You
Today
</syntaxhighlight>
</lang>
 
=={{header|dt}}==
<syntaxhighlight lang="dt">"Hello,How,Are,You,Today" "," split "." join pl</syntaxhighlight>
 
=={{header|Dyalect}}==
<syntaxhighlight lang="dyalect">var str = "Hello,How,Are,You,Today"
 
var strings = str.Split(',')
<lang dyalect>var str = "Hello,How,Are,You,Today"
print(values: strings, separator: ".")</syntaxhighlight>
var strings = str.split(',')
print(values: strings, separator: ".")</lang>
 
{{out}}
 
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Déjà Vu}}==
<langsyntaxhighlight lang="dejavu">!print join "." split "Hello,How,Are,You,Today" ","</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|E}}==
<langsyntaxhighlight lang="e">".".rjoin("Hello,How,Are,You,Today".split(","))</langsyntaxhighlight>
 
=={{header|EasyLang}}==
<syntaxhighlight lang="easylang">
s$ = "Hello,How,Are,You,Today"
a$[] = strsplit s$ ","
for s$ in a$[]
write s$ & "."
.
</syntaxhighlight>
 
=={{header|Elena}}==
ELENA 46.x:
<langsyntaxhighlight lang="elena">import system'routines;
import extensions;
public program()
{
varauto string := "Hello,How,Are,You,Today";
string.splitBy:(",").forEach::(s)
{
console.print(s,".")
}
}</langsyntaxhighlight>
 
=={{header|Elixir}}==
<langsyntaxhighlight lang="elixir">
tokens = String.split("Hello,How,Are,You,Today", ",")
IO.puts Enum.join(tokens, ".")
</syntaxhighlight>
</lang>
 
=={{header|EMal}}==
<syntaxhighlight lang="emal">
text value = "Hello,How,Are,You,Today"
List tokens = value.split(",")
writeLine(tokens.join("."))
# single line version
writeLine("Hello,How,Are,You,Today".split(",").join("."))
</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
Hello.How.Are.You.Today
</pre>
 
=={{header|Erlang}}==
<langsyntaxhighlight lang="erlang">-module(tok).
-export([start/0]).
 
Line 1,417 ⟶ 1,731:
Lst = string:tokens("Hello,How,Are,You,Today",","),
io:fwrite("~s~n", [string:join(Lst,".")]),
ok.</langsyntaxhighlight>
 
=={{header|Euphoria}}==
<langsyntaxhighlight lang="euphoria">function split(sequence s, integer c)
sequence out
integer first, delim
Line 1,441 ⟶ 1,755:
for i = 1 to length(s) do
puts(1, s[i] & ',')
end for</langsyntaxhighlight>
 
=={{header|F_Sharp|F#}}==
<langsyntaxhighlight lang="fsharp">System.String.Join(".", "Hello,How,Are,You,Today".Split(','))</langsyntaxhighlight>
 
=={{header|Factor}}==
<langsyntaxhighlight lang="factor">"Hello,How,Are,You,Today" "," split "." join print</langsyntaxhighlight>
 
=={{header|Falcon}}==
'''VBA/Python programmer's approach to this solution, not sure if it's the most falconic way'''
<langsyntaxhighlight lang="falcon">
/* created by Aykayayciti Earl Lamont Montgomery
April 9th, 2018 */
Line 1,465 ⟶ 1,779:
 
> b
</syntaxhighlight>
</lang>
{{out}}
<pre>
Line 1,476 ⟶ 1,790:
A string can be split on a given character, returning a list of the intervening strings.
 
<langsyntaxhighlight lang="fantom">
class Main
{
Line 1,489 ⟶ 1,803:
}
}
</syntaxhighlight>
</lang>
 
=={{header|Fennel}}==
{{trans|Lua}}
<syntaxhighlight lang="fennel">(fn string.split [self sep]
(let [pattern (string.format "([^%s]+)" sep)
fields {}]
(self:gsub pattern (fn [c] (tset fields (+ 1 (length fields)) c)))
fields))
 
(let [str "Hello,How,Are,You,Today"]
(print (table.concat (str:split ",") ".")))</syntaxhighlight>
 
=={{header|Forth}}==
There is no standard string split routine, but it is easily written. The results are saved temporarily to the dictionary.
 
<langsyntaxhighlight lang="forth">: split ( str len separator len -- tokens count )
here >r 2swap
begin
Line 1,511 ⟶ 1,836:
1 ?do dup 2@ type ." ." cell+ cell+ loop 2@ type ;
 
s" Hello,How,Are,You,Today" s" ," split .tokens \ Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Fortran}}==
{{works with|Fortran|90 and later}}
<langsyntaxhighlight lang="fortran">PROGRAM Example
 
CHARACTER(23) :: str = "Hello,How,Are,You,Today"
Line 1,537 ⟶ 1,862:
END DO
END PROGRAM Example</langsyntaxhighlight>
 
=={{header|Frink}}==
<langsyntaxhighlight lang="frink">
println[join[".", split[",", "Hello,How,Are,You,Today"]]]
</syntaxhighlight>
</lang>
 
=={{header|FutureBasic}}==
<syntaxhighlight lang="futurebasic">
window 1, @"Tokenize a string"
 
void local fn DoIt
CFStringRef string = @"Hello,How,Are,You,Today"
CFArrayRef tokens = fn StringComponentsSeparatedByString( string, @"," )
print fn ArrayComponentsJoinedByString( tokens, @"." )
end fn
 
fn DoIt
 
HandleEvents
</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|Gambas}}==
'''[https://gambas-playground.proko.eu/?gist=218e240236cdf1419a405abfed906ed3 Click this link to run this code]'''
<langsyntaxhighlight lang="gambas">Public Sub Main()
Dim sString As String[] = Split("Hello,How,Are,You,Today")
 
Print sString.Join(".")
 
End</langsyntaxhighlight>
Output:
<pre>
Line 1,558 ⟶ 1,902:
 
=={{header|GAP}}==
<langsyntaxhighlight lang="gap">SplitString("Hello,How,Are,You,Today", ",");
# [ "Hello", "How", "Are", "You", "Today" ]
 
JoinStringsWithSeparator(last, ".");
# "Hello.How.Are.You.Today"</langsyntaxhighlight>
 
=={{header|Genie}}==
<langsyntaxhighlight lang="genie">[indent=4]
 
init
Line 1,571 ⟶ 1,915:
words:array of string[] = str.split(",")
joined:string = string.joinv(".", words)
print joined</langsyntaxhighlight>
 
{{out}}
Line 1,579 ⟶ 1,923:
 
=={{header|Go}}==
<langsyntaxhighlight lang="go">package main
 
import (
Line 1,589 ⟶ 1,933:
s := "Hello,How,Are,You,Today"
fmt.Println(strings.Join(strings.Split(s, ","), "."))
}</langsyntaxhighlight>
 
=={{header|Groovy}}==
<langsyntaxhighlight lang="groovy">println 'Hello,How,Are,You,Today'.split(',').join('.')</langsyntaxhighlight>
 
=={{header|Haskell}}==
'''Using Data.Text'''
 
<langsyntaxhighlight lang="haskell">{-# OPTIONS_GHC -XOverloadedStrings #-}
import Data.Text (splitOn,intercalate)
import qualified Data.Text.IO as T (putStrLn)
 
main = T.putStrLn . intercalate "." $ splitOn "," "Hello,How,Are,You,Today"</langsyntaxhighlight>
 
Output: Hello.How.Are.You.Today
Line 1,609 ⟶ 1,953:
The necessary operations are unfortunately not in the standard library (yet), but simple to write:
 
<langsyntaxhighlight lang="haskell">splitBy :: (a -> Bool) -> [a] -> [[a]]
splitBy _ [] = []
splitBy f list = first : splitBy f (dropWhile f rest) where
Line 1,624 ⟶ 1,968:
-- using regular expression to split:
import Text.Regex
putStrLn $ joinWith "." $ splitRegex (mkRegex ",") $ "Hello,How,Are,You,Today"</langsyntaxhighlight>
 
Tokenizing can also be realized by using unfoldr and break:
<langsyntaxhighlight Haskelllang="haskell">*Main> mapM_ putStrLn $ takeWhile (not.null) $ unfoldr (Just . second(drop 1). break (==',')) "Hello,How,Are,You,Today"
Hello
How
Are
You
Today</langsyntaxhighlight>
* You need to import the modules Data.List and Control.Arrow
 
Line 1,638 ⟶ 1,982:
 
=={{header|HicEst}}==
<langsyntaxhighlight lang="hicest">CHARACTER string="Hello,How,Are,You,Today", list
 
nWords = INDEX(string, ',', 256) + 1
Line 1,650 ⟶ 1,994:
DO i = 1, nWords
WRITE(APPend) TRIM(CHAR(i, maxWordLength, list)), '.'
ENDDO</langsyntaxhighlight>
 
=={{header|Icon}} and {{header|Unicon}}==
<langsyntaxhighlight lang="icon">procedure main()
A := []
"Hello,How,Are,You,Today" ? {
Line 1,661 ⟶ 2,005:
every writes(!A,".")
write()
end</langsyntaxhighlight>
 
{{out}}
Line 1,671 ⟶ 2,015:
 
A Unicon-specific solution is:
<langsyntaxhighlight lang="unicon">import util
 
procedure main()
Line 1,677 ⟶ 2,021:
every writes(!A,".")
write()
end</langsyntaxhighlight>
 
One wonders what the expected output should be with the input string ",,,,".
 
=={{header|Io}}==
<langsyntaxhighlight lang="io">"Hello,How,Are,You,Today" split(",") join(".") println</langsyntaxhighlight>
 
=={{header|J}}==
<langsyntaxhighlight lang="j"> s=: 'Hello,How,Are,You,Today'
] t=: <;._1 ',',s
+-----+---+---+---+-----+
Line 1,694 ⟶ 2,038:
 
'.' (I.','=s)}s NB. two steps combined
Hello.How.Are.You.Today</langsyntaxhighlight>
 
Alternatively using the system library/script <tt>strings</tt>
<langsyntaxhighlight lang="j"> require 'strings'
',' splitstring s
+-----+---+---+---+-----+
Line 1,704 ⟶ 2,048:
 
'.' joinstring ',' splitstring s
Hello.How.Are.You.Today</langsyntaxhighlight>
 
<tt>splitstring</tt> and <tt>joinstring</tt> also work with longer "delimiters":
<langsyntaxhighlight lang="j"> '"'([ ,~ ,) '","' joinstring ',' splitstring s
"Hello","How","Are","You","Today"</langsyntaxhighlight>
 
But, of course, this could be solved with simple string replacement:
 
<langsyntaxhighlight Jlang="j"> rplc&',.' s
Hello.How.Are.You.Today</langsyntaxhighlight>
 
The task asks us to ''Separate the string "Hello,How,Are,You,Today" by commas into an array (or list) so that each element of it stores a different word.'' but for many purposes the original string is an adequate data structure. Note also that given a string, a list of "word start" indices and "word length" integers can be logically equivalent to having an "array of words" -- and, depending on implementation details may be a superior or inferior choice to some other representation. But, in current definition of this task, the concept of "word length" plays no useful role.
Line 1,719 ⟶ 2,063:
Note also that J provides several built-in concepts of parsing: split on leading delimiter, split on trailing delimiter, split J language words. Also, it's sometimes more efficient to append to a string than to prepend to it. So a common practice for parsing on an embedded delimiter is to append a copy of the delimiter to the string and then use the appended result:
 
<langsyntaxhighlight Jlang="j"> fn;._2 string,','</langsyntaxhighlight>
 
Here '''fn''' is applied to each ',' delimited substring and the results are assembled into an array.
 
Or, factoring out the names:
<langsyntaxhighlight Jlang="j"> fn ((;._2)(@(,&','))) string</langsyntaxhighlight>
 
=={{header|Java}}==
Line 1,735 ⟶ 2,079:
{{works with|Java|1.8+}}
 
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
System.out.println(String.join(".", toTokenize.split(",")));</langsyntaxhighlight>
 
{{works with|Java|1.4+}}
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
 
String words[] = toTokenize.split(",");//splits on one comma, multiple commas yield multiple splits
Line 1,745 ⟶ 2,089:
for(int i=0; i<words.length; i++) {
System.out.print(words[i] + ".");
}</langsyntaxhighlight>
 
The other way is to use StringTokenizer. It will skip any empty tokens. So if two commas are given in line, there will be an empty string in the array given by the split function, but no empty string with the StringTokenizer object. This method takes more code to use, but allows you to get tokens incrementally instead of all at once.
 
{{works with|Java|1.0+}}
<langsyntaxhighlight lang="java5">String toTokenize = "Hello,How,Are,You,Today";
 
StringTokenizer tokenizer = new StringTokenizer(toTokenize, ",");
while(tokenizer.hasMoreTokens()) {
System.out.print(tokenizer.nextToken() + ".");
}</langsyntaxhighlight>
 
=={{header|JavaScript}}==
<syntaxhighlight lang="javascript">console.log(
{{works with|Firefox|2.0}}
"Hello,How,Are,You,Today"
.split(",")
.join(".")
);</syntaxhighlight>A more advanced program to tokenise strings:<syntaxhighlight lang="javascript" line="1">
const Tokeniser = (function () {
const numberRegex = /-?(\d+\.d+|\d+\.|\.\d+|\d+)((e|E)(\+|-)?\d+)?/g;
return {
settings: {
operators: ["<", ">", "=", "+", "-", "*", "/", "?", "!"],
separators: [",", ".", ";", ":", " ", "\t", "\n"],
groupers: ["(", ")", "[", "]", "{", "}", '"', '"', "'", "'"],
keepWhiteSpacesAsTokens: false,
trimTokens: true
},
isNumber: function (value) {
if (typeof value === "number") {
return true;
} else if (typeof value === "string") {
return numberRegex.test(value);
}
return false;
},
closeGrouper: function (grouper) {
if (this.settings.groupers.includes(grouper)) {
return this.settings.groupers[this.settings.groupers.indexOf(grouper) + 1];
}
return null;
},
tokenType: function (char) {
if (this.settings.operators.includes(char)) {
return "operator";
} else if (this.settings.separators.includes(char)) {
return "separator";
} else if (this.settings.groupers.includes(char)) {
return "grouper";
}
return "other";
},
parseString: function (str) {
if (typeof str !== "string") {
if (str === null) {
return "null";
} if (typeof str === "object") {
str = JSON.stringify(str);
} else {
str = str.toString();
}
}
let tokens = [], _tempToken = "";
for (let i = 0; i < str.length; i++) {
if (this.tokenType(_tempToken) !== this.tokenType(str[i]) || this.tokenType(str[i]) === "separator") {
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
_tempToken = str[i];
if (this.tokenType(_tempToken) === "separator") {
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
_tempToken = "";
}
} else {
_tempToken += str[i];
}
}
if (_tempToken.trim() !== "") {
tokens.push(this.settings.trimTokens ? _tempToken.trim() : _tempToken);
} else if (this.settings.keepWhiteSpacesAsTokens) {
tokens.push(_tempToken);
}
return tokens.filter((token) => token !== "");
}
};
})();
</syntaxhighlight>Output:<syntaxhighlight lang="javascript">
Tokeniser.parseString("Hello,How,Are,You,Today");
 
<lang// javascript->alert( "['Hello', ',', 'How', ',', 'Are', ',', 'You',Today".split(" ',',").join(".") );</lang>'Today']
</syntaxhighlight>
 
=={{header|jq}}==
<langsyntaxhighlight lang="jq">split(",") | join(".")</langsyntaxhighlight>Example:<langsyntaxhighlight lang="sh">$ jq -r 'split(",") | join(".")'
"Hello,How,Are,You,Today"
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Jsish}}==
Being in the ECMAScript family, Jsi is blessed with many easy to use character, string and array manipulation routines.
 
<langsyntaxhighlight lang="javascript">puts('Hello,How,Are,You,Today'.split(',').join('.'))</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Julia}}==
<syntaxhighlight lang="julia">
<lang Julia>
s = "Hello,How,Are,You,Today"
a = split(s, ",")
Line 1,783 ⟶ 2,208:
println("Splits into ", a)
println("Reconstitutes to \"", t, "\"")
</syntaxhighlight>
</lang>
 
{{out}}
Line 1,793 ⟶ 2,218:
 
=={{header|K}}==
<langsyntaxhighlight Klang="k">words: "," \: "Hello,How,Are,You,Today"
"." /: words</langsyntaxhighlight>
 
{{out}}
Line 1,800 ⟶ 2,225:
"Hello.How.Are.You.Today"
</pre>
 
{{works with|ngn/k}}<syntaxhighlight lang=K>","\"Hello,How,Are,You,Today"
("Hello"
"How"
"Are"
"You"
"Today")</syntaxhighlight>
 
=={{header|Klingphix}}==
<langsyntaxhighlight Klingphixlang="klingphix">( "Hello,How,Are,You,Today" "," ) split len [ get print "." print ] for
 
nl "End " input</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.
Line 1,811 ⟶ 2,243:
=={{header|Kotlin}}==
{{works with|Kotlin|1.0b4}}
<langsyntaxhighlight lang="scala">fun main(args: Array<String>) {
val input = "Hello,How,Are,You,Today"
println(input.split(',').joinToString("."))
}</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Ksh}}==
<syntaxhighlight lang="ksh">
#!/bin/ksh
 
# Tokenize a string
 
# # Variables:
#
string="Hello,How,Are,You,Today"
inputdelim=\, # a comma
outputdelim=\. # a period
 
# # Functions:
#
# # Function _tokenize(str, indelim, outdelim)
#
function _tokenize {
typeset _str ; _str="$1"
typeset _ind ; _ind="$2"
typeset _outd ; _outd="$3"
while [[ ${_str} != ${_str/${_ind}/${_outd}} ]]; do
_str=${_str/${_ind}/${_outd}}
done
 
echo "${_str}"
}
 
######
# main #
######
 
_tokenize "${string}" "${inputdelim}" "${outputdelim}"</syntaxhighlight>
{{out}}<pre>Hello.How.Are.You.Today</pre>
 
=={{header|LabVIEW}}==
Line 1,823 ⟶ 2,290:
 
=={{header|Lambdatalk}}==
<langsyntaxhighlight lang="scheme">
{S.replace , by . in Hello,How,Are,You,Today}.
-> Hello.How.Are.You.Today.
</syntaxhighlight>
</lang>
 
=={{header|Lang}}==
<syntaxhighlight lang="lang">
$str = Hello,How,Are,You,Today
fn.println(fn.join(\., fn.split($str, \,)))
</syntaxhighlight>
 
=={{header|Lang5}}==
<langsyntaxhighlight lang="lang5">'Hello,How,Are,You,Today ', split '. join .</langsyntaxhighlight>
 
=={{header|LDPL}}==
<langsyntaxhighlight lang="ldpl">
DATA:
explode/words is text vector
Line 1,884 ⟶ 2,357:
add 1 and i in i
repeat
</syntaxhighlight>
</lang>
 
=={{header|LFE}}==
 
<langsyntaxhighlight lang="lisp">
> (set split (string:tokens "Hello,How,Are,You,Today" ","))
("Hello" "How" "Are" "You" "Today")
> (string:join split ".")
"Hello.How.Are.You.Today"
</syntaxhighlight>
</lang>
 
=={{header|Lingo}}==
<langsyntaxhighlight lang="lingo">input = "Hello,How,Are,You,Today"
_player.itemDelimiter = ","
output = ""
Line 1,904 ⟶ 2,377:
delete the last char of output
put output
-- "Hello.How.Are.You.Today"</langsyntaxhighlight>
 
=={{header|Logo}}==
{{works with|UCB Logo}}
<langsyntaxhighlight lang="logo">to split :str :sep
output parse map [ifelse ? = :sep ["| |] [?]] :str
end</langsyntaxhighlight>
 
This form is more robust, doing the right thing if there are embedded spaces.
<langsyntaxhighlight lang="logo">to split :str :by [:acc []] [:w "||]
if empty? :str [output lput :w :acc]
ifelse equal? first :str :by ~
[output (split butfirst :str :by lput :w :acc)] ~
[output (split butfirst :str :by :acc lput first :str :w)]
end</langsyntaxhighlight>
 
<langsyntaxhighlight lang="logo">? show split "Hello,How,Are,You,Today ",
[Hello How Are You Today]</langsyntaxhighlight>
 
=={{header|Logtalk}}==
Using Logtalk built-in support for Definite Clause Grammars (DCGs) and representing the strings as atoms for readbility:
<langsyntaxhighlight lang="logtalk">
:- object(spliting).
 
Line 1,948 ⟶ 2,421:
 
:- end_object.
</syntaxhighlight>
</lang>
{{out}}
<pre>
Line 1,958 ⟶ 2,431:
=={{header|Lua}}==
Split function callously stolen from the lua-users wiki
<langsyntaxhighlight Lualang="lua">function string:split (sep)
local sep, fields = sep or ":", {}
local pattern = string.format("([^%s]+)", sep)
Line 1,966 ⟶ 2,439:
 
local str = "Hello,How,Are,You,Today"
print(table.concat(str:split(","), "."))</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|M2000 Interpreter}}==
<syntaxhighlight lang="m2000 interpreter">
<lang M2000 Interpreter>
Module CheckIt {
Function Tokenize$(s){
Line 1,984 ⟶ 2,457:
}
Checkit
</syntaxhighlight>
</lang>
 
=={{header|M4}}==
<langsyntaxhighlight M4lang="m4">define(`s',`Hello,How,Are,You,Today')
define(`set',`define(`$1[$2]',`$3')')
define(`get',`defn($1[$2])')
Line 1,997 ⟶ 2,470:
define(`show',
`ifelse(eval(j<n),1,`get(a,j).`'define(`j',incr(j))`'show')')
show</langsyntaxhighlight>
 
{{out}}
Line 2,005 ⟶ 2,478:
 
=={{header|Maple}}==
<langsyntaxhighlight Maplelang="maple">StringTools:-Join(StringTools:-Split("Hello,How,Are,You,Today", ","),".");</langsyntaxhighlight>
{{Out|Output}}
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Mathematica}}/{{header|Wolfram Language}}==
<langsyntaxhighlight Mathematicalang="mathematica">StringJoin@StringSplit["Hello,How,Are,You,Today", "," -> "."]</langsyntaxhighlight>
 
=={{header|MATLAB}} / {{header|Octave}}==
<syntaxhighlight lang="matlab">
<lang Matlab>
s=strsplit('Hello,How,Are,You,Today',',')
fprintf(1,'%s.',s{:})
</syntaxhighlight>
</lang>
 
{{out}}
Line 2,024 ⟶ 2,497:
 
=={{header|Maxima}}==
<langsyntaxhighlight Maximalang="maxima">l: split("Hello,How,Are,You,Today", ",")$
printf(true, "~{~a~^.~}~%", l)$</langsyntaxhighlight>
 
A slightly different way
<syntaxhighlight lang="maxima">
split("Hello,How,Are,You,Today",",")$
simplode(%,".");
</syntaxhighlight>
{{out}}
<pre>
"Hello.How.Are.You.Today"
</pre>
 
=={{header|MAXScript}}==
<langsyntaxhighlight lang="maxscript">output = ""
for word in (filterString "Hello,How,Are,You,Today" ",") do
(
output += (word + ".")
)
format "%\n" output</langsyntaxhighlight>
 
=={{header|Mercury}}==
<syntaxhighlight lang="text">
:- module string_tokenize.
:- interface.
Line 2,049 ⟶ 2,532:
Tokens = string.split_at_char((','), "Hello,How,Are,You,Today"),
io.write_list(Tokens, ".", io.write_string, !IO),
io.nl(!IO).</langsyntaxhighlight>
 
=={{header|min}}==
{{works with|min|0.19.3}}
<langsyntaxhighlight lang="min">"Hello,How,Are,You,Today" "," split "." join print</langsyntaxhighlight>
 
=={{header|MiniScript}}==
<langsyntaxhighlight MiniScriptlang="miniscript">tokens = "Hello,How,Are,You,Today".split(",")
print tokens.join(".")</langsyntaxhighlight>
 
=={{header|MMIX}}==
<langsyntaxhighlight lang="mmix">sep IS ','
EOS IS 0
NL IS 10
Line 2,113 ⟶ 2,596:
LDBU t,tp
PBNZ t,2B % UNTIL EOB(uffer)
TRAP 0,Halt,0</langsyntaxhighlight>
{{out}}
<pre>
Line 2,125 ⟶ 2,608:
 
=={{header|Modula-3}}==
<langsyntaxhighlight lang="modula3">MODULE Tokenize EXPORTS Main;
 
IMPORT IO, TextConv;
Line 2,142 ⟶ 2,625:
END;
IO.Put("\n");
END Tokenize.</langsyntaxhighlight>
 
=={{header|MUMPS}}==
<langsyntaxhighlight MUMPSlang="mumps">TOKENS
NEW I,J,INP
SET INP="Hello,how,are,you,today"
Line 2,151 ⟶ 2,634:
NEW J FOR J=1:1:I WRITE INP(J) WRITE:J'=I "."
KILL I,J,INP // Kill is optional. "New" variables automatically are killed on "Quit"
QUIT</langsyntaxhighlight>
 
In use:
Line 2,158 ⟶ 2,641:
 
=={{header|Nanoquery}}==
<langsyntaxhighlight lang="nanoquery">for word in "Hello,How,Are,You,Today".split(",")
print word + "."
end</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.</pre>
 
=={{header|Nemerle}}==
<langsyntaxhighlight Nemerlelang="nemerle">using System;
using System.Console;
using Nemerle.Utility.NString;
Line 2,178 ⟶ 2,661:
// a quick in place list comprehension takes care of that
}
}</langsyntaxhighlight>
 
=={{header|NetRexx}}==
<langsyntaxhighlight NetRexxlang="netrexx">/*NetRexx program *****************************************************
* 20.08.2012 Walter Pachl derived from REXX Version 3
**********************************************************************/
Line 2,192 ⟶ 2,675:
Say ss.word(i)'.'
End
Say 'End-of-list.'</langsyntaxhighlight>
Output as in REXX version
 
=={{header|NewLISP}}==
<langsyntaxhighlight NewLISPlang="newlisp">(print (join (parse "Hello,How,Are,You,Today" ",") "."))</langsyntaxhighlight>
 
=={{header|Nial}}==
Line 2,204 ⟶ 2,687:
Define Array with input string:
 
<langsyntaxhighlight Niallang="nial"> s := 'Hello,How,Are,You,Today'
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|H|e|l|l|o|,|H|o|w|,|A|r|e|,|Y|o|u|,|T|o|d|a|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+</langsyntaxhighlight>
 
Split string at the commas:
 
<langsyntaxhighlight Niallang="nial"> t := s eachall = `, cut s
+-----------+-------+-------+-------+-----------+
|+-+-+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+-+-+|
||H|e|l|l|o|||H|o|w|||A|r|e|||Y|o|u|||T|o|d|a|y||
|+-+-+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+|+-+-+-+-+-+|
+-----------+-------+-------+-------+-----------+</langsyntaxhighlight>
 
Join string with <code>.</code> and remove last <code>.</code>
 
<langsyntaxhighlight Niallang="nial"> u := front content (cart t `.)
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|H|e|l|l|o|.|H|o|w|.|A|r|e|.|Y|o|u|.|T|o|d|a|y|
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+</langsyntaxhighlight>
 
Less cluttered display, using <code>set "sketch;set "nodecor</code> display switches.
 
<langsyntaxhighlight Niallang="nial"> s:='Hello,How,Are,You,Today'
Hello,How,Are,You,Today
t:= s eachall = `, cut s
Line 2,234 ⟶ 2,717:
+-----+---+---+---+-----+
u:=front content (cart t `.)
 
Hello.How.Are.You.Today</lang>
Hello.How.Are.You.Today</syntaxhighlight>
 
Or as a one-liner:
 
<syntaxhighlight lang="nial">
front content (cart (s eachall = `, cut s) `.)
</syntaxhighlight>
 
=={{header|Nim}}==
<langsyntaxhighlight lang="nim">import strutils
 
let text = "Hello,How,Are,You,Today"
let tokens = text.split(',')
echo tokens.join(".")</langsyntaxhighlight>
 
{{out}}
Line 2,247 ⟶ 2,737:
 
=={{header|Objeck}}==
<langsyntaxhighlight lang="objeck">
class Parse {
function : Main(args : String[]) ~ Nil {
Line 2,255 ⟶ 2,745:
};
}
}</langsyntaxhighlight>
 
=={{header|Objective-C}}==
Line 2,262 ⟶ 2,752:
{{works with|Cocoa}}
 
<langsyntaxhighlight lang="objc">NSString *text = @"Hello,How,Are,You,Today";
NSArray *tokens = [text componentsSeparatedByString:@","];
NSString *result = [tokens componentsJoinedByString:@"."];
NSLog(result);</langsyntaxhighlight>
 
=={{header|OCaml}}==
To split on a single-character separator:
<langsyntaxhighlight lang="ocaml">let words = String.split_on_char ',' "Hello,How,Are,You,Today" in
String.concat "." words
</syntaxhighlight>
</lang>
 
The function split_on_char has been introduced in OCaml 4.04. In previous versions, it could be implemented by:
 
<langsyntaxhighlight lang="ocaml">let split_on_char sep s =
let r = ref [] in
let j = ref (String.length s) in
Line 2,284 ⟶ 2,774:
end
done;
String.sub s 0 !j :: !r</langsyntaxhighlight>
 
=={{header|Oforth}}==
 
<langsyntaxhighlight Oforthlang="oforth">"Hello,How,Are,You,Today" wordsWith(',') println</langsyntaxhighlight>
 
{{out}}
Line 2,296 ⟶ 2,786:
 
=={{header|ooRexx}}==
<langsyntaxhighlight ooRexxlang="oorexx">text='Hello,How,Are,You,Today'
do while text \= ''
parse var text word1 ',' text
call charout 'STDOUT:',word1'.'
end</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today.</pre>
 
=={{header|OpenEdge/Progress}}==
<langsyntaxhighlight lang="progress">FUNCTION tokenizeString RETURNS CHAR (
i_c AS CHAR
):
Line 2,328 ⟶ 2,818:
MESSAGE
tokenizeString( "Hello,How,Are,You,Today" )
VIEW-AS ALERT-BOX.</langsyntaxhighlight>
{{out}}
<pre>
Line 2,341 ⟶ 2,831:
 
=={{header|Oz}}==
<langsyntaxhighlight lang="oz">for T in {String.tokens "Hello,How,Are,You,Today" &,} do
{System.printInfo T#"."}
end</langsyntaxhighlight>
 
=={{header|PARI/GP}}==
Line 2,353 ⟶ 2,843:
{{Works with|PARI/GP|2.7.4 and above}}
 
<langsyntaxhighlight lang="parigp">
\\ Tokenize a string str according to 1 character delimiter d. Return a list of tokens.
\\ Using ssubstr() from http://rosettacode.org/wiki/Substring#PARI.2FGP
Line 2,371 ⟶ 2,861:
print("3.",tokenize(",Hello,,How,Are,You,Today",","));
}
</langsyntaxhighlight>
 
{{Output}}
Line 2,389 ⟶ 2,879:
{{Works with|PARI/GP|2.7.4 and above}}
 
<langsyntaxhighlight lang="parigp">
\\ Tokenize a string str according to 1 character delimiter d. Return a list of tokens.
\\ Using ssubstr() from http://rosettacode.org/wiki/Substring#PARI.2FGP
Line 2,418 ⟶ 2,908:
print("7. 0 pp: ", stok("",","));
}
</langsyntaxhighlight>
 
{{Output}}
Line 2,434 ⟶ 2,924:
=={{header|Pascal}}==
{{works with|Free_Pascal}}
<langsyntaxhighlight lang="pascal">program TokenizeString;
 
{$mode objfpc}{$H+}
Line 2,458 ⟶ 2,948:
Tokens.Free;
end;
end.</langsyntaxhighlight>
 
The result is:
Line 2,465 ⟶ 2,955:
 
=={{header|Perl}}==
<langsyntaxhighlight lang="perl">print join('.', split /,/, 'Hello,How,Are,You,Today'), "\n";</langsyntaxhighlight>
CLI one-liner form:
<langsyntaxhighlight lang="perl">echo "Hello,How,Are,You,Today" | perl -aplF/,/ -e '$" = "."; $_ = "@F";'</langsyntaxhighlight>
which is a compact way of telling Perl to do
<langsyntaxhighlight lang="perl">BEGIN { $/ = "\n"; $\ = "\n"; }
LINE: while (defined($_ = <ARGV>)) {
chomp $_;
Line 2,478 ⟶ 2,968:
continue {
die "-p destination: $!\n" unless print $_;
}</langsyntaxhighlight>
 
=={{header|Phix}}==
<!--<langsyntaxhighlight Phixlang="phix">(phixonline)-->
<span style="color: #0000FF;">?</span><span style="color: #7060A8;">join</span><span style="color: #0000FF;">(</span><span style="color: #7060A8;">split</span><span style="color: #0000FF;">(</span><span style="color: #008000;">"Hello,How,Are,You,Today"</span><span style="color: #0000FF;">,</span><span style="color: #008000;">","</span><span style="color: #0000FF;">),</span><span style="color: #008000;">"."</span><span style="color: #0000FF;">)</span>
<!--</langsyntaxhighlight>-->
{{Out}}
<pre>
Line 2,490 ⟶ 2,980:
 
=={{header|Phixmonti}}==
<langsyntaxhighlight Phixmontilang="phixmonti">/# "Hello,How,Are,You,Today" "," "." subst print #/
"Hello,How,Are,You,Today" "," " " subst split len for get print "." print endfor</langsyntaxhighlight>
 
=={{header|PHP}}==
{{works with|PHP|5.x}}
 
<langsyntaxhighlight lang="php"><?php
$str = 'Hello,How,Are,You,Today';
echo implode('.', explode(',', $str));
?></langsyntaxhighlight>
 
=={{header|Picat}}==
Using the built-in functions <code>split/2</code> and <code>join/2</code>.
<syntaxhighlight lang="picat">import util.
 
go =>
S = "Hello,How,Are,You,Today",
T = S.split(","),
println(T),
T.join(".").println(),
 
% As a one liner:
S.split(",").join(".").println().</syntaxhighlight>
 
{{out}}
<pre>[Hello,How,Are,You,Today]
Hello.How.Are.You.Today
Hello.How.Are.You.Today</pre>
 
=={{header|PicoLisp}}==
<langsyntaxhighlight PicoLisplang="picolisp">(mapcar pack
(split (chop "Hello,How,Are,You,Today") ",") )</langsyntaxhighlight>
 
=={{header|Pike}}==
<langsyntaxhighlight lang="pike">("Hello,How,Are,You,Today" / ",") * ".";</langsyntaxhighlight>
 
=={{header|PL/I}}==
<langsyntaxhighlight lang="pli">tok: Proc Options(main);
declare s character (100) initial ('Hello,How,Are,You,Today');
declare n fixed binary (31);
Line 2,531 ⟶ 3,039:
put skip list (string(table));
end;
end;</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|PL/M}}==
<langsyntaxhighlight lang="plm">100H:
/* CP/M CALLS */
BDOS: PROCEDURE (FN, ARG); DECLARE FN BYTE, ARG ADDRESS; GO TO 5; END BDOS;
Line 2,575 ⟶ 3,083:
 
CALL EXIT;
EOF;</langsyntaxhighlight>
{{out}}
<pre>HELLO. HOW. ARE. YOU. TODAY. </pre>
 
=={{header|Plain English}}==
<langsyntaxhighlight lang="plainenglish">To run:
Start up.
Split "Hello,How,Are,You,Today" into some string things given the comma byte.
Line 2,596 ⟶ 3,104:
If the string thing's next is not nil, append the byte to the string.
Put the string thing's next into the string thing.
Repeat.</langsyntaxhighlight>
{{out}}
<pre>
Line 2,609 ⟶ 3,117:
First show the use of sysparse_string to break up a string and make a list of strings.
 
<langsyntaxhighlight lang="pop11">;;; Make a list of strings from a string using space as separator
lvars list;
sysparse_string('the cat sat on the mat') -> list;
;;; print the list of strings
list =>
** [the cat sat on the mat]</langsyntaxhighlight>
 
By giving it an extra parameter 'true' we can make it recognize numbers and produce a list of strings and numbers
 
<langsyntaxhighlight lang="pop11">lvars list;
sysparse_string('one 1 two 2 three 3 four 4', true) -> list;
;;; print the list of strings and numbers
Line 2,627 ⟶ 3,135:
** <true>
isinteger(list(2))=>
** <true></langsyntaxhighlight>
 
Now show some uses of the built in procedure sys_parse_string, which allows more options:
 
<langsyntaxhighlight lang="pop11">;;; Make pop-11 print strings with quotes
true -> pop_pr_quotes;
;;;
Line 2,645 ⟶ 3,153:
;;; print the list of strings
strings =>
** ['Hello' 'How' 'Are' 'You' 'Today']</langsyntaxhighlight>
 
If {% ... %} were used instead of [% ... %] the result would be
a vector (i.e. array) of strings rather than a list of strings.
 
<langsyntaxhighlight lang="pop11">{% sys_parse_string(str, `,`) %} -> strings;
;;; print the vector
strings =>
** {'Hello' 'How' 'Are' 'You' 'Today'}</langsyntaxhighlight>
It is also possible to give sys_parse_string a 'conversion' procedure, which is applied to each of the tokens.
E.g. it could be used to produce a vector of numbers, using the conversion procedure 'strnumber', which converts a string to a number:
 
<langsyntaxhighlight lang="pop11">lvars numbers;
{% sys_parse_string('100 101 102 103 99.9 99.999', strnumber) %} -> numbers;
;;; the result is a vector containing integers and floats,
;;; which can be printed thus:
numbers =>
** {100 101 102 103 99.9 99.999}</langsyntaxhighlight>
 
Using lower level pop-11 facilities to tokenise the string:
 
<langsyntaxhighlight lang="pop11">;;; Declare and initialize variables
lvars str='Hello,How,Are,You,Today';
;;; Iterate over string
Line 2,684 ⟶ 3,192:
endif;
;;; Reverse the list
rev(ls) -> ls;</langsyntaxhighlight>
 
Since the task requires to use array we convert list to array
 
<langsyntaxhighlight lang="pop11">;;; Put list elements and lenght on the stack
destlist(ls);
;;; Build a vector from them
Line 2,696 ⟶ 3,204:
printf(ar(i), '%s.');
endfor;
printf('\n');</langsyntaxhighlight>
 
We could use list directly for printing:
 
<langsyntaxhighlight lang="pop11">for i in ls do
printf(i, '%s.');
endfor;</langsyntaxhighlight>
 
so the conversion to vector is purely to satisfy task formulation.
Line 2,708 ⟶ 3,216:
=={{header|PowerShell}}==
{{works with|PowerShell|1}}
<langsyntaxhighlight lang="powershell">$words = "Hello,How,Are,You,Today".Split(',')
[string]::Join('.', $words)</langsyntaxhighlight>
 
{{works with|PowerShell|2}}
<langsyntaxhighlight lang="powershell">$words = "Hello,How,Are,You,Today" -split ','
$words -join '.'</langsyntaxhighlight>
 
{{works with|PowerShell|2}}
The StringSplitOptions enumeration weeds out the return of empty elements.
<syntaxhighlight lang="powershell">
<lang PowerShell>
"Hello,How,Are,You,Today", ",,Hello,,Goodbye,," | ForEach-Object {($_.Split(',',[StringSplitOptions]::RemoveEmptyEntries)) -join "."}
</syntaxhighlight>
</lang>
{{Out}}
<pre>
Line 2,728 ⟶ 3,236:
=={{header|Prolog}}==
{{works with|SWI Prolog}}
<langsyntaxhighlight lang="prolog">splitup(Sep,[token(B)|BL]) --> splitup(Sep,B,BL).
splitup(Sep,[A|AL],B) --> [A], {\+ [A] = Sep }, splitup(Sep,AL,B).
splitup(Sep,[],[B|BL]) --> Sep, splitup(Sep,B,BL).
Line 2,736 ⟶ 3,244:
phrase(splitup(".",Tokens),Backtogether),
string_to_list(ABack,Backtogether),
writeln(ABack).</langsyntaxhighlight>
{{out}}
<pre>
Line 2,748 ⟶ 3,256:
this can be accomplished in a few lines in the top level:
 
<langsyntaxhighlight lang="prolog">
?- split_string("Hello,How,Are,You,Today", ",", "", Split),
| atomics_to_string(Split, ".", PeriodSeparated),
| writeln(PeriodSeparated).
Hello.How.Are.You.Today
</syntaxhighlight>
</lang>
 
=={{header|Python}}==
{{works with|Python|2.5}}{{works with|Python|3.0}}
 
<langsyntaxhighlight lang="python">text = "Hello,How,Are,You,Today"
tokens = text.split(',')
print ('.'.join(tokens))</langsyntaxhighlight>
 
Or if interpretation of the task description means you don't need to keep an intermediate array:
<langsyntaxhighlight lang="python">print ('.'.join('Hello,How,Are,You,Today'.split(',')))</langsyntaxhighlight>
 
=={{header|Q}}==
<langsyntaxhighlight Qlang="q">words: "," vs "Hello,How,Are,You,Today"
"." sv words</langsyntaxhighlight>
 
{{out}}
Line 2,774 ⟶ 3,282:
=={{header|QB64}}==
''CBTJD'': 2020/03/12
<langsyntaxhighlight lang="vb">a$ = "Hello,How,Are,You,Today" ' | Initialize original string.
FOR na = 1 TO LEN(a$) ' | Start loop to count number of commas.
IF MID$(a$, na, 1) = "," THEN nc = nc + 1 ' | For each comma, increment nc.
Line 2,792 ⟶ 3,300:
PRINT LEFT$(tf$, LEN(tf$) - 1) ' | Print all but the last period of tf$.
END ' | Program end.
</syntaxhighlight>
</lang>
 
'''Alternative method using word$ function:'''
----
''CBTJD'': 2020/03/12
<langsyntaxhighlight lang="vb">a$ = "Hello,How,Are,You,Today" ' | Initialize original string.
DIM t$(LEN(a$) / 2) ' | Create an overestimated sized array.
FOR nd = 1 TO LEN(a$) ' | Start loop to find each comma.
Line 2,825 ⟶ 3,333:
DONE: ' | Label for bail destination of word count error check.
END FUNCTION ' | End of function.
</syntaxhighlight>
</lang>
 
=={{header|Quackery}}==
 
<langsyntaxhighlight Quackerylang="quackery"> [ [] [] rot
witheach
[ dup char , = iff
Line 2,838 ⟶ 3,346:
[ witheach [ echo$ say "." ] ] is display ( [ --> )
$ "Hello,How,Are,You,Today" tokenise display</langsyntaxhighlight>
 
{{Out}}
Line 2,845 ⟶ 3,353:
 
=={{header|R}}==
<langsyntaxhighlight Rlang="r">text <- "Hello,How,Are,You,Today"
junk <- strsplit(text, split=",")
print(paste(unlist(junk), collapse="."))</langsyntaxhighlight>
 
or the one liner
 
<langsyntaxhighlight Rlang="r">paste(unlist(strsplit(text, split=",")), collapse=".")</langsyntaxhighlight>
 
=={{header|Racket}}==
 
<langsyntaxhighlight lang="racket">
#lang racket
(string-join (string-split "Hello,How,Are,You,Today" ",") ".")
;; -> "Hello.How.Are.You.Today"
</syntaxhighlight>
</lang>
 
=={{header|Raku}}==
(formerly Perl 6)
{{works with|Rakudo|#22 "Thousand Oaks"}}
<syntaxhighlight lang="raku" perl6line>'Hello,How,Are,You,Today'.split(',').join('.').say;</langsyntaxhighlight>
 
Or with function calls:
 
<syntaxhighlight lang="raku" perl6line>say join '.', split ',', 'Hello,How,Are,You,Today';</langsyntaxhighlight>
 
=={{header|Raven}}==
<langsyntaxhighlight lang="raven">'Hello,How,Are,You,Today' ',' split '.' join print</langsyntaxhighlight>
 
=={{header|REBOL}}==
<langsyntaxhighlight REBOLlang="rebol">print ["Original:" original: "Hello,How,Are,You,Today"]
tokens: parse original ","
dotted: "" repeat i tokens [append dotted rejoin [i "."]]
print ["Dotted: " dotted]</langsyntaxhighlight>
 
{{out}}
Line 2,886 ⟶ 3,394:
 
=={{header|Red}}==
<langsyntaxhighlight Redlang="red">str: "Hello,How,Are,You,Today"
>> tokens: split str ","
>> probe tokens
Line 2,893 ⟶ 3,401:
>> periods: replace/all form tokens " " "." ;The word FORM converts the list series to a string removing quotes.
>> print periods ;then REPLACE/ALL spaces with period
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Retro}}==
<syntaxhighlight lang="retro">{{
<lang Retro>{{
: char ( -$ ) " " ;
: tokenize ( $-$$ )
Line 2,908 ⟶ 3,416:
[ tokenize action dup 1 <> ] while drop
^buffer'get drop ;
}}</langsyntaxhighlight>
 
This will suffice to split a string into an array of substrings. It is used like this:
 
<langsyntaxhighlight Retrolang="retro">create strings 100 allot
"Hello,How,Are,You,Today" ', strings split</langsyntaxhighlight>
 
Since the buffer' vocabulary creates a zero-terminated buffer, we can display it using the each@ combinator and a simple quote:
 
<langsyntaxhighlight Retrolang="retro">strings [ @ "%s." puts ] ^types'STRING each@</langsyntaxhighlight>
 
=={{header|REXX}}==
===version 1===
This REXX version doesn't append a period to the last word in the list.
<langsyntaxhighlight lang="rexx">/*REXX program separates a string of comma─delimited words, and echoes them ──► terminal*/
original = 'Hello,How,Are,You,Today' /*some words separated by commas (,). */
say 'The input string:' original /*display original string ──► terminal.*/
Line 2,934 ⟶ 3,442:
say @.j || left(., j\==#) /*maybe append a period (.) to a word. */
end /*j*/ /* [↑] don't append a period if last. */
say center(' End─of─list ', 40, "═") /*display a (EOL) trailer for the list.*/</langsyntaxhighlight>
{{out|output|text=&nbsp; when using the internal default input:}}
<pre>
Line 2,952 ⟶ 3,460:
 
Hello,Betty Sue,How,Are,You,Today
<langsyntaxhighlight lang="rexx">/*REXX program to separate a string of comma-delimited words and echo */
sss='Hello,How,Are,You,Today'
say 'input string='sss
Line 2,963 ⟶ 3,471:
say word(ss,i)dot
End
say 'End-of-list.'</langsyntaxhighlight>
'''output''' is similar to REXX version 1.
 
=={{header|Ring}}==
<langsyntaxhighlight lang="ring">
see substr("Hello,How,Are,You,Today", ",", ".")
</syntaxhighlight>
</lang>
 
=={{header|RPL}}==
The program below fully complies with the task requirements, e.g. the input string is converted to a list of words, then the list is converted to a string.
{{works with|Halcyon Calc|4.2.8}}
{| class="wikitable"
! RPL code
! Comment
|-
|
"}" + "{" SWAP + STR→
1 OVER SIZE '''FOR''' j
DUP j GET →STR 2 OVER SIZE 1 - SUB j SWAP PUT
'''NEXT'''
"" 1 3 PICK SIZE '''FOR''' j
OVER j GET +
'''IF''' OVER SIZE j ≠ '''THEN''' "." + '''END'''
'''NEXT''' SWAP DROP
≫ '<span style="color:blue">'''TOKNZ'''</span>' STO
|
<span style="color:blue">'''TOKNZ'''</span> ''<span style="color:grey">( "word,word" → "word.word" )</span> ''
convert string into list (words being between quotes)
loop for each list item
convert it to a string, remove quotes at beginning and end
loop for each list item
add item to output string
if not last item, append "."
clean stack
return output string
|}
 
"Hello,How,Are,You,Today" <span style="color:blue">'''TOKNZ'''</span>
</pre>
'''Output:'''
<span style="color:grey"> 1:</span> "Hello.How.Are.You.Today"
If direct string-to-string conversion is allowed, then this one-liner for HP-48+ will do the job:
≪ 1 OVER SIZE '''FOR''' j '''IF''' DUP j DUP SUB "," == '''THEN''' j "." REPL '''END NEXT''' ≫ '<span style="color:blue">'''TOKNZ'''</span>' STO
 
=={{header|Ruby}}==
<langsyntaxhighlight lang="ruby">puts "Hello,How,Are,You,Today".split(',').join('.')</langsyntaxhighlight>
 
=={{header|Rust}}==
<langsyntaxhighlight lang="rust">fn main() {
let s = "Hello,How,Are,You,Today";
let tokens: Vec<&str> = s.split(",").collect();
println!("{}", tokens.join("."));
}</langsyntaxhighlight>
 
=={{header|S-lang}}==
<langsyntaxhighlight Slang="s-lang">variable a = strchop("Hello,How,Are,You,Today", ',', 0);
print(strjoin(a, "."));</langsyntaxhighlight>
 
{{out}}
Line 2,989 ⟶ 3,535:
 
=={{header|Scala}}==
<langsyntaxhighlight lang="scala">println("Hello,How,Are,You,Today" split "," mkString ".")</langsyntaxhighlight>
 
=={{header|Scheme}}==
{{works with|Guile}}
<langsyntaxhighlight lang="scheme">(use-modules (ice-9 regex))
(define s "Hello,How,Are,You,Today")
(define words (map match:substring (list-matches "[^,]+" s)))
Line 3,000 ⟶ 3,546:
(display (list-ref words n))
(if (< n (- (length words) 1))
(display ".")))</langsyntaxhighlight>
 
(with SRFI 13)
<langsyntaxhighlight lang="scheme">(define s "Hello,How,Are,You,Today")
(define words (string-tokenize s (char-set-complement (char-set #\,))))
(define t (string-join words "."))</langsyntaxhighlight>
 
{{works with|Gauche Scheme}}
<langsyntaxhighlight Schemelang="scheme">(print
(string-join
(string-split "Hello,How,Are,You,Today" #\,)
".")) </langsyntaxhighlight>
{{output}}
<pre>
Line 3,018 ⟶ 3,564:
 
=={{header|Seed7}}==
<langsyntaxhighlight lang="seed7">var array string: tokens is 0 times "";
 
tokens := split("Hello,How,Are,You,Today", ",");</langsyntaxhighlight>
 
=={{header|Self}}==
<langsyntaxhighlight lang="self">| s = 'Hello,How,Are,You,Today' |
((s splitOn: ',') joinUsing: '.') printLine.
</syntaxhighlight>
</lang>
 
=={{header|Sidef}}==
<langsyntaxhighlight lang="ruby">'Hello,How,Are,You,Today'.split(',').join('.').say;</langsyntaxhighlight>
=={{header|Simula}}==
<langsyntaxhighlight lang="simula">BEGIN
 
CLASS TEXTARRAY(N); INTEGER N;
Line 3,080 ⟶ 3,626:
 
END.
</syntaxhighlight>
</lang>
{{out}}
<pre>HELLO.HOW.ARE.YOU.TODAY.</pre>
 
=={{header|Slate}}==
<langsyntaxhighlight lang="slate">('Hello,How,Are,You,Today' splitWith: $,) join &separator: '.'.</langsyntaxhighlight>
 
=={{header|Slope}}==
<syntaxhighlight lang="slope">(display
(list->string
(string->list
"Hello,How,Are,You,Today"
",")
"."))</syntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|Smalltalk}}==
<langsyntaxhighlight lang="smalltalk">|array |
array := 'Hello,How,Are,You,Today' subStrings: $,.
array fold: [:concatenation :string | concatenation, '.', string ]</langsyntaxhighlight>
 
Some implementations also have a ''join:'' convenience method that allows the following shorter solution:
 
<langsyntaxhighlight lang="smalltalk">('Hello,How,Are,You,Today' subStrings: $,) join: '.'</langsyntaxhighlight>
 
The solution displaying a trailing period would be:
 
<langsyntaxhighlight lang="smalltalk">|array |
array := 'Hello,How,Are,You,Today' subStrings: $,.
array inject: '' into: [:concatenation :string | concatenation, string, '.' ]</langsyntaxhighlight>
 
=={{header|SNOBOL4}}==
Line 3,106 ⟶ 3,662:
For this task, it's convenient to define Perl-style split( ) and join( ) functions.
 
<langsyntaxhighlight SNOBOL4lang="snobol4"> define('split(chs,str)i,j,t,w2') :(split_end)
split t = table()
sp1 str pos(0) (break(chs) | rem) $ t<i = i + 1>
Line 3,122 ⟶ 3,678:
* # Test and display
output = join('.',split(',','Hello,How,Are,You,Today'))
end</langsyntaxhighlight>
 
{{out}}
Line 3,130 ⟶ 3,686:
 
=={{header|Standard ML}}==
<langsyntaxhighlight lang="sml">val splitter = String.tokens (fn c => c = #",");
val main = (String.concatWith ".") o splitter;</langsyntaxhighlight>
 
Test:
 
<langsyntaxhighlight lang="sml">- main "Hello,How,Are,You,Today"
val it = "Hello.How.Are.You.Today" : string</langsyntaxhighlight>
 
=={{header|Swift}}==
 
{{works with|Swift|3.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = text.components(separatedBy: ",") // for single or multi-character separator
print(tokens)
let result = tokens.joined(separator: ".")
print(result)</langsyntaxhighlight>
 
{{works with|Swift|2.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = text.characters.split(",").map{String($0)} // for single-character separator
print(tokens)
let result = tokens.joinWithSeparator(".")
print(result)</langsyntaxhighlight>
 
{{works with|Swift|1.x}}
<langsyntaxhighlight lang="swift">let text = "Hello,How,Are,You,Today"
let tokens = split(text, { $0 == "," }) // for single-character separator
println(tokens)
let result = ".".join(tokens)
println(result)</langsyntaxhighlight>
 
For multi-character separators:<langsyntaxhighlight lang="swift">import Foundation
 
let text = "Hello,How,Are,You,Today"
let tokens = text.componentsSeparatedByString(",")
print(tokens)</langsyntaxhighlight>
 
=={{header|Tcl}}==
Generating a list form a string by splitting on a comma:
<langsyntaxhighlight lang="tcl">split $string ","</langsyntaxhighlight>
 
Joining the elements of a list by a period:
<langsyntaxhighlight lang="tcl">join $list "."</langsyntaxhighlight>
 
Thus the whole thing would look like this:
<langsyntaxhighlight lang="tcl">puts [join [split "Hello,How,Are,You,Today" ","] "."]</langsyntaxhighlight>
 
If you'd like to retain the list in a variable with the name "words", it would only be marginally more complex:
<langsyntaxhighlight lang="tcl">puts [join [set words [split "Hello,How,Are,You,Today" ","]] "."]</langsyntaxhighlight>
 
(In general, the <tt>regexp</tt> command is also used in Tcl for tokenization of strings, but this example does not need that level of complexity.)
Line 3,185 ⟶ 3,741:
<code>tr</code> knows nothing about arrays, so this solution only changes each comma to a period.
 
<langsyntaxhighlight lang="bash">echo 'Hello,How,Are,You,Today' | tr ',' '.'</langsyntaxhighlight>
 
=={{header|Transd}}==
<syntaxhighlight lang="Scheme">#lang transd
 
MainModule: {
_start: (lambda locals: s "Hello,How,Are,You,Today"
(textout (join (split s ",") "."))
)
}</syntaxhighlight>
{{out}}
<pre>
Hello.How.Are.You.Today
</pre>
 
=={{header|TUSCRIPT}}==
<langsyntaxhighlight lang="tuscript">
$$ MODE TUSCRIPT
SET string="Hello,How,Are,You,Today"
SET string=SPLIT (string,":,:")
SET string=JOIN (string,".")
</syntaxhighlight>
</lang>
 
=={{header|TXR}}==
Line 3,200 ⟶ 3,769:
sequences of non-commas.
 
<langsyntaxhighlight lang="txr">@(next :list "Hello,How,Are,You,Today")
@(coll)@{token /[^,]+/}@(end)
@(output)
@(rep)@token.@(last)@token@(end)
@(end)</langsyntaxhighlight>
 
Different approach. Collect tokens, each of
Line 3,210 ⟶ 3,779:
before a comma, or else extends to the end of the line.
 
<langsyntaxhighlight lang="txr">@(next :list "Hello,How,Are,You,Today")
@(coll)@(maybe)@token,@(or)@token@(end)@(end)
@(output)
@(rep)@token.@(last)@token@(end)
@(end)</langsyntaxhighlight>
 
Using TXR Lisp:
 
<langsyntaxhighlight lang="bash">txr -p '(cat-str (split-str "Hello,How,Are,You,Today" ",") ".")'
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|UNIX Shell}}==
{{works with|Bourne Shell}}
<langsyntaxhighlight lang="bash">string='Hello,How,Are,You,Today'
 
(IFS=,
printf '%s.' $string
echo)</langsyntaxhighlight>
 
----
{{works with|Bourne Again SHell}}
{{works with|Public Domain Korn SHell|5.2.14}}
<langsyntaxhighlight lang="bash">#! /bin/bash
stripchar-l ()
#removes the specified character from the left side of the string
Line 3,289 ⟶ 3,858:
join "$( split "$list" "$input_delimiter" )" \
"$contains_a_space" "$output_delimiter";
}</langsyntaxhighlight>
 
''Example''
 
<langsyntaxhighlight lang="bash"> strtokenize "Hello,How,Are,You,Today" "," "."
Hello.How.Are.You.Today </langsyntaxhighlight>
 
----
Line 3,302 ⟶ 3,871:
{{works with|ksh93}}
{{works with|zsh}}
<syntaxhighlight lang="sh">
<lang sh>
string1="Hello,How,Are,You,Today"
elements_quantity=$(echo $string1|tr "," "\n"|wc -l)
Line 3,315 ⟶ 3,884:
 
# or to cheat
echo "Hello,How,Are,You,Today"|tr "," "."</langsyntaxhighlight>
 
=={{header|UnixPipes}}==
{{works with|Bourne Shell}}
<langsyntaxhighlight lang="bash">token() {
(IFS=, read -r A B; echo "$A".; test -n "$B" && (echo "$B" | token))
}
 
echo "Hello,How,Are,You" | token</langsyntaxhighlight>
 
=={{header|Ursa}}==
<langsyntaxhighlight lang="ursa">decl string text
set text "Hello,How,Are,You,Today"
decl string<> tokens
Line 3,333 ⟶ 3,902:
out tokens<i> "." console
end for
out endl console</langsyntaxhighlight>
 
=={{header|Ursala}}==
Line 3,341 ⟶ 3,910:
second order function parameterized by the delimiter. Character
literals are preceded by a backquote.
<langsyntaxhighlight Ursalalang="ursala">#import std
 
token_list = sep`, 'Hello,How,Are,You,Today'
Line 3,347 ⟶ 3,916:
#cast %s
 
main = mat`. token_list</langsyntaxhighlight>
{{out}}
<pre>
Line 3,354 ⟶ 3,923:
 
=={{header|Vala}}==
<langsyntaxhighlight Valalang="vala">void main() {
string s = "Hello,How,Are,You,Today";
print(@"$(string.joinv(".", s.split(",")))");
}</langsyntaxhighlight>
{{out}}
<pre>Hello.How.Are.You.Today</pre>
 
=={{header|VBA}}==
<langsyntaxhighlight lang="vb">Sub Main()
Dim temp() As String
temp = Tokenize("Hello,How,Are,You,Today", ",")
Line 3,374 ⟶ 3,943:
Private Sub Display(arr() As String, sep As String)
Debug.Print Join(arr, sep)
End Sub</langsyntaxhighlight>
{{Out}}
<pre>Hello How Are You Today</pre>
 
=={{header|VBScript}}==
<syntaxhighlight lang="vb">
<lang vb>
s = "Hello,How,Are,You,Today"
WScript.StdOut.Write Join(Split(s,","),".")
</syntaxhighlight>
</lang>
{{Out}}
<pre>Hello.How.Are.You.Today</pre>
Line 3,392 ⟶ 3,961:
The contents of each text register is then displayed to user, separated by a period.
 
<langsyntaxhighlight lang="vedit">Buf_Switch(Buf_Free)
Ins_Text("Hello,How,Are,You,Today")
 
Line 3,411 ⟶ 3,980:
}
 
Buf_Quit(OK)</langsyntaxhighlight>
 
=={{header|V (Vlang)}}==
<syntaxhighlight lang="go">// Tokenize a string, in V (Vlang)
// Tectonics: v run tokenize-a-string.v
module main
 
// starts here
pub fn main() {
println("Hello,How,Are,You,Today".split(',').join('.'))
}</syntaxhighlight>
{{out}}
<pre>prompt$ v run rosetta/tokenize-a-string.v
Hello.How.Are.You.Today</pre>
 
=={{header|WinBatch}}==
 
<langsyntaxhighlight WinBatchlang="winbatch">text = 'Hello,How,Are,You,Today'
result = ''
BoxOpen('WinBatch Tokenizing Example', '')
Line 3,423 ⟶ 4,005:
next
display(10, 'End of Program', 'Dialog and program will close momentarily.')
BoxShut()</langsyntaxhighlight>
 
{{out}}
Line 3,429 ⟶ 4,011:
 
=={{header|Wortel}}==
<langsyntaxhighlight lang="wortel">@join "." @split "," "Hello,How,Are,You,Today"</langsyntaxhighlight>
Returns
<pre>"Hello.How.Are.You.Today"</pre>
 
=={{header|Wren}}==
<langsyntaxhighlight ecmascriptlang="wren">var s = "Hello,How,Are,You,Today"
var t = s.split(",").join(".") + "."
System.print(t)</langsyntaxhighlight>
 
{{out}}
Line 3,444 ⟶ 4,026:
 
=={{header|XPath 2.0}}==
<langsyntaxhighlight XPathlang="xpath">string-join(tokenize("Hello,How,Are,You,Today", ","), ".")</langsyntaxhighlight>
 
{{out}}
Line 3,450 ⟶ 4,032:
 
=={{header|XPL0}}==
<langsyntaxhighlight XPL0lang="xpl0">string 0;
include c:\cxpl\codes;
int I, J, K, Char;
Line 3,469 ⟶ 4,051:
for K:= 4 downto 0 do [Text(0, addr Array(K,0)); ChOut(0, ^.)];
CrLf(0);
]</langsyntaxhighlight>
 
The 'addr' operator is used to fetch the 32-bit address of Array rather
Line 3,480 ⟶ 4,062:
 
=={{header|Yabasic}}==
<langsyntaxhighlight Yabasiclang="yabasic">dim s$(1)
 
n = token("Hello. How are you today?", s$(), ".? ")
Line 3,488 ⟶ 4,070:
if i < n print ".";
next
print</langsyntaxhighlight>
 
=={{header|Zig}}==
<syntaxhighlight lang="zig">const std = @import("std");
pub fn main() void {
const string = "Hello,How,Are,You,Today";
var tokens = std.mem.split(u8, string, ",");
std.debug.print("{s}", .{tokens.next().?});
while (tokens.next()) |token| {
std.debug.print(".{s}", .{token});
}
}</syntaxhighlight>
=={{header|zkl}}==
<langsyntaxhighlight lang="zkl">"Hello,How,Are,You,Today".split(",").concat(".").println();
Hello.How.Are.You.Today</langsyntaxhighlight>
 
=={{header|Zoea}}==
<syntaxhighlight lang="zoea">
<lang Zoea>
program: tokenize_a_string
input: "Hello,How,Are,You,Today"
output: "Hello.How.Are.You.Today"
</syntaxhighlight>
</lang>
 
=={{header|Zoea Visual}}==
Line 3,505 ⟶ 4,097:
 
=={{header|Zsh}}==
<langsyntaxhighlight lang="zsh">str='Hello,How,Are,You,Today'
tokens=(${(s:,:)str})
print ${(j:.:)tokens}</langsyntaxhighlight>
 
Or, using SH_SPLIT_WORD:
 
<langsyntaxhighlight lang="zsh">str='Hello,How,Are,You,Today'
IFS=, echo ${(j:.:)${=str}}</langsyntaxhighlight>
 
{{omit from|PARI/GP|No real capacity for string manipulation}}
9,476

edits