# Sample text text = "htms090+sebuah+keluarga+di+kampung+a+kimika+upd"
# Replace '+' with spaces for proper tokenization text = text.replace("+", " ")
# Tokenize tokens = word_tokenize(text)
import nltk from nltk.tokenize import word_tokenize
# Sample text text = "htms090+sebuah+keluarga+di+kampung+a+kimika+upd"
# Replace '+' with spaces for proper tokenization text = text.replace("+", " ")
# Tokenize tokens = word_tokenize(text)
import nltk from nltk.tokenize import word_tokenize