I'm trying to close all opened orders, they are all MarketOrders not PendingOrders and have SL and TP set to 0 so they shouldn't close themselves (they are still opened in Terminal). I am storing tickets in the array so my loop looks like:
for(int i = 0; i < trades.size(); ++i) OrderClose(tickets[i], lots[i], Bid, Slippage);
yet I'm still receiving "INVALID TICKET" error, can you tell me why?
It doesn't happen always, its like some orders are closed and some throw Invalid Ticket.
I didn't notice this behavior with only 1 order so I'm assuming it occurs only when there are more of them.
Code:
template < typename T >
struct vector {
vector() {}
vector(int arraySize) {
if (arraySize < 0) { arraySize *= -1; }
ArrayResize(m_data, arraySize);
}
vector(vector & rhs) {
if (size() < rhs.size()) { ArrayResize(m_data, rhs.size()); }
for(uint i = 0; i < size(); ++i) { m_data[i] = rhs.m_data[i]; }
}
vector operator=(vector & rhs) {
if (size() < rhs.size()) { ArrayResize(m_data, rhs.size()); }
for(uint i = 0; i < size(); ++i) { m_data[i] = rhs.m_data[i]; }
return this;
}
T operator[](uint index) { return m_data[index]; }
void push_back( T value ) {
ArrayResize(m_data, ArraySize(m_data) + 1);
m_data[ ArraySize(m_data) - 1 ] = value;
}
uint size() { return ArraySize(m_data); }
void resize(uint newSize) { ArrayResize(m_data, newSize); }
void erase() {
ZeroMemory(m_data);
ArrayResize(m_data, 0);
}
void assign(uint index, T value) {
m_data[index] = value;
}
private:
T m_data[];
};
string Buy(double lots) {
string alertString = "";
int __ticket;
if ( (__ticket = OrderSend (Symbol(), OP_BUY, lots, Ask, Slippage, 0, 0, NULL, m_magic)) != -1 )
{
m_buyTicket.push_back( __ticket );
m_buyLots.push_back( lots );
m_buyPrice.push_back( Ask );
m_buyAccel.push_back( lots / Lots );
m_buyPos.push_back( 0 );
alertString = "Buy function call." +
"\nAsk\t= " + (string)Round(Ask) +
"\nBid\t= " + (string)Round(Bid) +
"\nLots\t= " + (string)Round(lots) +
"\nSpread\t= " + (string)m_spread +
"\nID\t= " + (string)CountAll();
}
else {
int _error = GetLastError();
alertString = "Error " + (string)_error + "\n" + TranslateError( _error );
}
return alertString;
}
string CloseAll() {
string alertString = "CloseAll function call.";
// Buy closing
for (uint i = 0; i < m_buyPrice.size(); ++i)
{
if ( OrderClose ( m_buyTicket[i], m_buyLots[i], Bid, Slippage) )
{
alertString += "\nBuy " + (string)(i+1) + " closed with profit " +
(string)Shrink ( (Bid - m_buyPrice[i]) * m_buyAccel[i] );
}
else
{
int _error = GetLastError();
alertString += "\nError " + (string)_error + "\n" + TranslateError( _error ) +
"\n(while closing Buy " + (string)(i+1) + ")";
}
}
// Sell closing
for (uint i = 0; i < m_sellPrice.size(); ++i)
{
if ( OrderClose ( m_sellTicket[i], m_sellLots[i], Ask, Slippage) )
{
alertString += "\nSell " + (string)(i+1) + " closed with profit " +
(string)Shrink ( (m_sellPrice[i] - Ask) * m_sellAccel[i] );
}
else
{
int _error = GetLastError();
alertString += "\nError " + (string)_error + "\n" + TranslateError( _error ) +
"\n(while closing Sell " + (string)(i+1) + ")";
}
}
return alertString;
}
when you close some ticket successfully, you do not remove it from the list trades, right? And it seems you should. Use your struct to clear the elements if order close is successful.
By the way, there's probably no need to reinvent the wheel, use CArrayObj as a container for your elements and delete them once order is closed.
My project need to use mksqashfs pack something to an image,when the content changes, I need to replace image file with the new one.
Before using sqashfs,I 'v used tarball+md5sum. Content changes ,md5 changes.
But,mksquashfs export image file changes every time,even the content is the same.
I'm walking through all the parameter of mksquashfs ,try to find the problem without luck right now.
There is another modified mksquashfs on github:
https://github.com/dannyp11/squashfs4.3.git
This fork adds -no-date option that makes mksquashfs create squash file with no make date info. Thus, if the content stays the same, result squash file has the same chksum.
diff -ruNa squashfs4.3/README squashfs4.3-new/README
--- squashfs4.3/README 2014-05-12 13:17:59.000000000 -0700
+++ squashfs4.3-new/README 2017-08-01 23:05:44.428051586 -0700
## -127,6 +127,9 ##
-keep-as-directory if one source directory is specified, create a root
directory containing that directory, rather than the
contents of the directory
+-no-date do not store the date in the squash file
+ (-noappend and -no-fragments is also activated)
+ this way, the squash always has the same cksum
Filesystem filter options:
-p <pseudo-definition> Add pseudo file definition
diff -ruNa squashfs4.3/squashfs-tools/mksquashfs.c squashfs4.3-new/squashfs-tools/mksquashfs.c
--- squashfs4.3/squashfs-tools/mksquashfs.c 2014-05-12 15:18:20.000000000 -0700
+++ squashfs4.3-new/squashfs-tools/mksquashfs.c 2017-08-01 21:26:03.092224142 -0700
## -98,6 +98,7 ##
int use_regex = FALSE;
int nopad = FALSE;
int exit_on_error = FALSE;
+int no_date = FALSE;
long long global_uid = -1, global_gid = -1;
## -3139,7 +3140,7 ##
buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
buf.st_uid = getuid();
buf.st_gid = getgid();
- buf.st_mtime = time(NULL);
+ buf.st_mtime = (!no_date)? time(NULL) : 0;
buf.st_dev = 0;
buf.st_ino = 0;
dir_ent->inode = lookup_inode2(&buf, PSEUDO_FILE_OTHER, 0);
## -3526,7 +3527,7 ##
buf.st_gid = pseudo_ent->dev->gid;
buf.st_rdev = makedev(pseudo_ent->dev->major,
pseudo_ent->dev->minor);
- buf.st_mtime = time(NULL);
+ buf.st_mtime = (!no_date)? time(NULL) : 0;
buf.st_ino = pseudo_ino ++;
if(pseudo_ent->dev->type == 'd') {
## -5262,6 +5263,12 ##
else if(strcmp(argv[i], "-keep-as-directory") == 0)
keep_as_directory = TRUE;
+ else if (strcmp(argv[i], "-no-date") == 0) {
+ no_date = TRUE;
+ delete = TRUE;
+ no_fragments = TRUE;
+ }
+
else if(strcmp(argv[i], "-exit-on-error") == 0)
exit_on_error = TRUE;
## -5315,6 +5322,9 ##
ERROR("\t\t\tdirectory containing that directory, "
"rather than the\n");
ERROR("\t\t\tcontents of the directory\n");
+ ERROR("-no-date\t\tdo not store the date in the squash file\n");
+ ERROR("\t\t\t(-noappend and -no-fragments is also activated)\n");
+ ERROR("\t\t\tthis way, the squash always has the same cksum\n");
ERROR("\nFilesystem filter options:\n");
ERROR("-p <pseudo-definition>\tAdd pseudo file "
"definition\n");
## -5717,7 +5727,7 ##
sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, no_fragments,
always_use_fragments, duplicate_checking, exportable,
no_xattrs, comp_opts);
- sBlk.mkfs_time = time(NULL);
+ sBlk.mkfs_time = (!no_date) ? time(NULL) : 0;
disable_info();
I have the same question, after some serach, I found this:
#32 Request to add a deterministic behavior to mksquashfs
Here is a patch.
The patch adds "-deterministic" option.
Here is what the option does :
- it forces to one processor
- it forces the date globaly. The forced date is printed out and must be reused with "-forcedate" option in order to reproduced the same squashfs image.
- it sorts entries of directory as they are read.
- it delays the start of the fragment deflator thread. When this late start is active, the queue to fragment deflator thread is using an implementation of the queue with unlimited size.
Entries of directory must be sorted because the inode numbers are attributed as the files are discovered ("lookup_inode()" in "dirscan1()").
The format of the date after "-forcedate" is the number of seconds since the Epoch : the output of "date '+%s'".
With this patch, I can copy a tree of files, I can use tar or I can repack a mounted squashfs, and always have the same suqashfs file.
--- a/squashfs-tools/mksquashfs.c
+++ b/squashfs-tools/mksquashfs.c
## -113,6 +113,9 ##
#include "compressor.h"
#include "xattr.h"
+int forcedate = -1;
+int sort_dir_entry = 0;
+
int delete = FALSE;
int fd;
int cur_uncompressed = 0, estimated_uncompressed = 0;
## -342,6 +345,18 ## struct queue {
void **data;
};
+/* struct describing queues used to pass data between threads */
+struct unlimited_queue {
+ struct unlimited_queue_node *first;
+ struct unlimited_queue_node *last;
+ pthread_mutex_t mutex;
+ pthread_cond_t any_event;
+};
+
+struct unlimited_queue_node {
+ void *data;
+ struct unlimited_queue_node* next;
+};
/* in memory uid tables */
#define ID_ENTRIES 256
## -362,6 +377,8 ## unsigned int sid_count = 0, suid_count = 0, sguid_count = 0;
struct cache *reader_buffer, *writer_buffer, *fragment_buffer;
struct queue *to_reader, *from_reader, *to_writer, *from_writer, *from_deflate,
*to_frag;
+struct unlimited_queue *to_frag_unlimited = NULL;
+
pthread_t *thread, *deflator_thread, *frag_deflator_thread, progress_thread;
pthread_mutex_t fragment_mutex;
pthread_cond_t fragment_waiting;
## -488,6 +505,93 ## void *queue_get(struct queue *queue)
return data;
}
+void queue_waitempty(struct queue *queue)
+{
+ pthread_mutex_lock(&queue->mutex);
+
+ while(queue->readp != queue->writep)
+ pthread_cond_wait(&queue->full, &queue->mutex);
+
+ pthread_cond_signal(&queue->full);
+ pthread_mutex_unlock(&queue->mutex);
+}
+
+struct unlimited_queue *unlimited_queue_init()
+{
+ struct unlimited_queue *queue = malloc(sizeof(struct unlimited_queue));
+
+ if(queue == NULL)
+ goto failed;
+
+ queue->first = NULL;
+ queue->last = NULL;
+ pthread_mutex_init(&queue->mutex, NULL);
+ pthread_cond_init(&queue->any_event, NULL);
+
+ return queue;
+
+failed:
+ BAD_ERROR("Out of memory in queue_init\n");
+}
+
+void unlimited_queue_put(struct unlimited_queue *queue, void *data)
+{
+ pthread_mutex_lock(&queue->mutex);
+
+ struct unlimited_queue_node *newNode = malloc(sizeof(struct unlimited_queue_node));
+ if(newNode == NULL){
+ BAD_ERROR("Out of memory in queue_init\n");
+ }
+
+ newNode->data = data;
+ newNode->next = NULL;
+
+ pthread_mutex_unlock(&queue->mutex);
+
+ if(queue->first == NULL){
+ queue->first = newNode;
+ } else {
+ queue->last->next = newNode;
+ }
+ queue->last = newNode;
+ pthread_cond_broadcast(&queue->any_event);
+ pthread_mutex_unlock(&queue->mutex);
+}
+
+void *unlimited_queue_get(struct unlimited_queue *queue)
+{
+ void *data;
+
+ pthread_mutex_lock(&queue->mutex);
+
+ while(queue->first == NULL)
+ pthread_cond_wait(&queue->any_event, &queue->mutex);
+
+ data = queue->first->data;
+ struct unlimited_queue_node* newFirst = queue->first->next;
+ free(queue->first);
+ queue->first = newFirst;
+/* eventually
+ if(newFirst == NULL){
+ queue->last == NULL;
+ }
+*/
+
+ pthread_cond_broadcast(&queue->any_event);
+ pthread_mutex_unlock(&queue->mutex);
+
+ return data;
+}
+
+void unlimited_queue_waitempty(struct unlimited_queue *queue)
+{
+ pthread_mutex_lock(&queue->mutex);
+
+ while(queue->first != NULL)
+ pthread_cond_wait(&queue->any_event, &queue->mutex);
+
+ pthread_mutex_unlock(&queue->mutex);
+}
/* Cache status struct. Caches are used to keep
track of memory buffers passed between different threads */
## -1852,7 +1956,11 ## void write_fragment()
fragment_data->block = fragments;
fragment_table[fragments].unused = 0;
fragments_outstanding ++;
- queue_put(to_frag, fragment_data);
+ if(to_frag_unlimited){
+ unlimited_queue_put(to_frag_unlimited, fragment_data);
+ }else{
+ queue_put(to_frag, fragment_data);
+ }
fragments ++;
fragment_size = 0;
pthread_mutex_unlock(&fragment_mutex);
## -2613,8 +2721,15 ## void *frag_deflator(void *arg)
BAD_ERROR("frag_deflator:: compressor_init failed\n");
while(1) {
+ struct file_buffer *file_buffer = NULL;
+
+ if(to_frag_unlimited){
+ file_buffer = unlimited_queue_get(to_frag_unlimited);
+ }else{
+ file_buffer = queue_get(to_frag);
+ }
+
int c_byte, compressed_size;
- struct file_buffer *file_buffer = queue_get(to_frag);
struct file_buffer *write_buffer =
cache_get(writer_buffer, file_buffer->block +
FRAG_INDEX, 1);
## -3594,7 +3709,11 ## void dir_scan(squashfs_inode *inode, char *pathname,
buf.st_mode = S_IRWXU | S_IRWXG | S_IRWXO | S_IFDIR;
buf.st_uid = getuid();
buf.st_gid = getgid();
- buf.st_mtime = time(NULL);
+ if(forcedate != -1){
+ buf.st_mtime = forcedate;
+ }else{
+ buf.st_mtime = time(NULL);
+ }
buf.st_dev = 0;
buf.st_ino = 0;
dir_ent->inode = lookup_inode(&buf);
## -3634,11 +3753,23 ## void dir_scan(squashfs_inode *inode, char *pathname,
dir_ent->inode->type = SQUASHFS_DIR_TYPE;
}
+struct file_dir_name{
+ char* filename;
+ char* dirname;
+};
+
+int compare_file_dir_name(const void* a, const void* b){
+ return strcmp((*(struct file_dir_name**)a)->filename, (*(struct file_dir_name**)b)->filename);
+}
+
+#define BASE_ALLOC_INCREMENT 128
struct dir_info *dir_scan1(char *pathname, struct pathnames *paths,
int (_readdir)(char *, char *, struct dir_info *))
{
- char filename[8192], dir_name[8192];
+ char filename2[8192], dir_name2[8192];
+ char *filename = filename2;
+ char *dir_name = dir_name2;
struct dir_info *dir = scan1_opendir(pathname);
if(dir == NULL) {
## -3646,18 +3777,69 ## struct dir_info *dir_scan1(char *pathname, struct pathnames *paths,
goto error;
}
- while(_readdir(filename, dir_name, dir) != FALSE) {
+ int base_count = 0;
+ void** base = NULL;
+
+ if(sort_dir_entry){
+ int base_allocated = BASE_ALLOC_INCREMENT;
+ base = malloc(base_allocated * sizeof(void*));
+
+ while(_readdir(filename2, dir_name2, dir) != FALSE) {
+ struct file_dir_name* new_element = malloc(sizeof(struct file_dir_name));
+ if(new_element == NULL){
+ BAD_ERROR("No more Memory !\n");
+ }
+ new_element->filename = strdup(filename2);
+ if(new_element->filename == NULL){
+ BAD_ERROR("No more Memory !\n");
+ }
+ new_element->dirname = strdup(dir_name2);
+ if(new_element->dirname == NULL){
+ BAD_ERROR("No more Memory !\n");
+ }
+
+ if(base_count >= base_allocated){
+ base_allocated += BASE_ALLOC_INCREMENT;
+ base = realloc(base, base_allocated * sizeof(void*));
+ if(base == NULL){
+ BAD_ERROR("No more Memory !\n");
+ }
+ }
+
+ base[base_count] = new_element;
+ ++base_count;
+ }
+
+ qsort(base, base_count, sizeof(void*), compare_file_dir_name);
+ }
+
+ int i = 0;
+
+ while(1){
+ if(sort_dir_entry){
+ if(i >= base_count){
+ break;
+ }
+ struct file_dir_name* cur_element = (struct file_dir_name*)base[i];
+ dir_name = cur_element->dirname;
+ filename = cur_element->filename;
+ }else{
+ if(_readdir(filename, dir_name, dir) == FALSE) {
+ break;
+ }
+ }
+
struct dir_info *sub_dir;
struct stat buf;
struct pathnames *new;
if(strcmp(dir_name, ".") == 0 || strcmp(dir_name, "..") == 0)
- continue;
+ goto loop_continue;
if(lstat(filename, &buf) == -1) {
ERROR("Cannot stat dir/file %s because %s, ignoring",
filename, strerror(errno));
- continue;
+ goto loop_continue;
}
if((buf.st_mode & S_IFMT) != S_IFREG &&
## -3669,29 +3851,40 ## struct dir_info *dir_scan1(char *pathname, struct pathnames *paths,
(buf.st_mode & S_IFMT) != S_IFSOCK) {
ERROR("File %s has unrecognised filetype %d, ignoring"
"\n", filename, buf.st_mode & S_IFMT);
- continue;
+ goto loop_continue;
}
if(old_exclude) {
if(old_excluded(filename, &buf))
- continue;
+ goto loop_continue;
} else {
if(excluded(paths, dir_name, &new))
- continue;
+ goto loop_continue;
}
if((buf.st_mode & S_IFMT) == S_IFDIR) {
sub_dir = dir_scan1(filename, new, scan1_readdir);
if(sub_dir == NULL)
- continue;
+ goto loop_continue;
dir->directory_count ++;
} else
sub_dir = NULL;
add_dir_entry(dir_name, filename, sub_dir, lookup_inode(&buf),
dir);
+ loop_continue:
+ if(sort_dir_entry){
+ free(dir_name);
+ free(filename);
+ free(base[i]);
+ base[i] = NULL;
+ i++;
+ }
}
+ if(sort_dir_entry){
+ free(base);
+ }
scan1_freedir(dir);
error:
## -3777,7 +3970,11 ## struct dir_info *dir_scan2(struct dir_info *dir, struct pseudo *pseudo)
buf.st_gid = pseudo_ent->dev->gid;
buf.st_rdev = makedev(pseudo_ent->dev->major,
pseudo_ent->dev->minor);
- buf.st_mtime = time(NULL);
+ if(forcedate != -1){
+ buf.st_mtime = forcedate;
+ }else{
+ buf.st_mtime = time(NULL);
+ }
buf.st_ino = pseudo_ino ++;
if(pseudo_ent->dev->type == 'f') {
## -4039,6 +4236,7 ## void add_old_root_entry(char *name, squashfs_inode inode, int inode_number,
old_root_entry[old_root_entries++].inode.root_entry = TRUE;
}
+int late_fragment_thread_start = 0;
void initialise_threads(int readb_mbytes, int writeb_mbytes,
int fragmentb_mbytes)
## -4046,7 +4244,7 ## void initialise_threads(int readb_mbytes, int writeb_mbytes,
int i;
sigset_t sigmask, old_mask;
int reader_buffer_size = readb_mbytes << (20 - block_log);
- int fragment_buffer_size = fragmentb_mbytes << (20 - block_log);
+ int fragment_buffer_size = (fragmentb_mbytes == -1 ? -1 : fragmentb_mbytes << (20 - block_log));
/*
* writer_buffer_size is global because it is needed in
## -4095,10 +4293,16 ## void initialise_threads(int readb_mbytes, int writeb_mbytes,
to_writer = queue_init(writer_buffer_size);
from_writer = queue_init(1);
from_deflate = queue_init(reader_buffer_size);
- to_frag = queue_init(fragment_buffer_size);
+
+ if(fragment_buffer_size == -1){
+ to_frag_unlimited = unlimited_queue_init();
+ }else{
+ to_frag = queue_init(fragment_buffer_size);
+ }
+
reader_buffer = cache_init(block_size, reader_buffer_size);
writer_buffer = cache_init(block_size, writer_buffer_size);
- fragment_buffer = cache_init(block_size, fragment_buffer_size);
+ fragment_buffer = cache_init(block_size, fragment_buffer_size != -1 ? fragment_buffer_size : FRAGMENT_BUFFER_DEFAULT << (20 - block_log));
pthread_create(&thread[0], NULL, reader, NULL);
pthread_create(&thread[1], NULL, writer, NULL);
pthread_create(&progress_thread, NULL, progress_thrd, NULL);
## -4109,9 +4313,11 ## void initialise_threads(int readb_mbytes, int writeb_mbytes,
if(pthread_create(&deflator_thread[i], NULL, deflator, NULL) !=
0)
BAD_ERROR("Failed to create thread\n");
- if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator,
- NULL) != 0)
- BAD_ERROR("Failed to create thread\n");
+ if(!late_fragment_thread_start){
+ if(pthread_create(&frag_deflator_thread[i], NULL, frag_deflator,
+ NULL) != 0)
+ BAD_ERROR("Failed to create thread\n");
+ }
}
printf("Parallel mksquashfs: Using %d processor%s\n", processors,
## -4701,6 +4907,31 ## int main(int argc, char *argv[])
"megabyte or larger\n", argv[0]);
exit(1);
}
+ } else if(strcmp(argv[i], "-forcedate") == 0) {
+ if(++i == argc) {
+ ERROR("%s: -forcedate missing fake date\n", argv[0]);
+ exit(1);
+ }
+ forcedate = strtol(argv[i], &b, 10);
+ if(*b != '\0') {
+ ERROR("%s: -forcedate invalid parameter\n", argv[0]);
+ exit(1);
+ }
+ } else if(strcmp(argv[i], "-deterministic") == 0) {
+ if(forcedate == -1){
+ forcedate = time(NULL);
+ }
+ processors = 1;
+ late_fragment_thread_start = 1;
+ fragmentb_mbytes = -1;
+ if(sort_dir_entry == 0){
+ sort_dir_entry = 1;
+ }
+ } else if(strcmp(argv[i], "-late-fragment-thread-start") == 0) {
+ late_fragment_thread_start = 1;
+ fragmentb_mbytes = -1;
+ } else if(strcmp(argv[i], "-unlimited-fragment-queue") == 0) {
+ fragmentb_mbytes = -1;
} else if(strcmp(argv[i], "-b") == 0) {
if(++i == argc) {
ERROR("%s: -b missing block size\n", argv[0]);
## -5292,7 +5523,11 ## printOptions:
sBlk.flags = SQUASHFS_MKFLAGS(noI, noD, noF, noX, no_fragments,
always_use_fragments, duplicate_checking, exportable,
no_xattrs, comp_opts);
- sBlk.mkfs_time = time(NULL);
+ if(forcedate != -1){
+ sBlk.mkfs_time = forcedate;
+ } else {
+ sBlk.mkfs_time = time(NULL);
+ }
restore_filesystem:
if(progress && estimated_uncompressed) {
## -5301,6 +5536,14 ## restore_filesystem:
}
write_fragment();
+
+ if(late_fragment_thread_start){
+ queue_waitempty(from_reader);
+ if(pthread_create(&frag_deflator_thread[0], NULL, frag_deflator,
+ NULL) != 0)
+ BAD_ERROR("Failed to create thread\n");
+ }
+
sBlk.fragments = fragments;
if(!restoring) {
unlock_fragments();
## -5340,7 +5583,8 ## restore_filesystem:
write_destination(fd, SQUASHFS_START, sizeof(sBlk), &sBlk);
if(!nopad && (i = bytes & (4096 - 1))) {
- char temp[4096] = {0};
+ char temp[4096];
+ memset(&temp, 0, 4096);
write_destination(fd, bytes, 4096 - i, temp);
}
## -5401,6 +5645,9 ## restore_filesystem:
printf("Number of directories %d\n", dir_count);
printf("Number of ids (unique uids + gids) %d\n", id_count);
printf("Number of uids %d\n", uid_count);
+ if(forcedate != -1){
+ printf("forcedate was %d\n", forcedate);
+ }
for(i = 0; i < id_count; i++) {
if(id_table[i]->flags & ISA_UID) {
This ActionScript code I have been working on for a few days now works 100% just fine in JavaScript, but when I try to compile it in ActionScript it says I have unexpected /, ), and } symbols. Is this syntax wrong and if so how should I fix it? I figured I could test it as Javascript for quicker testing using http://jsfiddle.net/ but now I'm like =(
var txt = "This is a [rainbow]test to show that I can[/rainbow] make whatever I want [rainbow]appear as a rainbow[/rainbow] because I am [rainbow]awesome[/rainbow].";
if ((txt.indexOf("[rainbow]") > -1) && (txt.indexOf("[/rainbow]") > -1)) {
var colors = ['f0f', 'f0c', 'f09', 'f06', 'f03', 'f00', 'f30', 'f60', 'f90', 'fc0', 'ff0', 'cf0', '9f0', '6f0', '3f0', '0f0', '0f3', '0f6', '0f9', '0fc', '0ff', '0cf', '09f', '06f', '03f', '00f', '30f', '60f', '90f', 'c0f'];
function rainbowify(text) {
return text.replace(/\[rainbow\](.+?)\[\/rainbow\]/g, function(_, inner) {
return inner.replace(/./g, function(ch, i) {
return '<font color="#' + colors[i % colors.length] + '">' + ch + '</font>';
});
})
}
txt = rainbowify(txt);
document.write(txt);
}
Well, this is it:
txt = txt.replace("'", "#");
if ((txt.indexOf("[rainbow]") > -1) && (txt.indexOf("[/rainbow]") > -1)) {
var firstChar = txt.indexOf("[rainbow]") + 9;
var lastChar = txt.indexOf("[/rainbow]");
while (lastChar <= txt.lastIndexOf("[/rainbow]")) {
var RAINBOWTEXT = '';
var i = firstChar;
while (i < lastChar) {
RAINBOWTEXT += txt.charAt(i);
i++
}
var text = RAINBOWTEXT;
var texty = '';
colors = new Array('ff00ff','ff00cc','ff0099','ff0066','ff0033','ff0000','ff3300','ff6600','ff9900','ffcc00','ffff00','ccff00','99ff00','66ff00','33ff00','00ff00','00ff33','00ff66','00ff99','00ffcc','00ffff','00ccff','0099ff','0066ff','0033ff','0000ff','3300ff','6600ff','9900ff','cc00ff');
i = 0;
while (i <= text.length) {
var t = text.charAt(i);
if (t != undefined) {
texty += "<font color=\"#" + colors[i % colors.length] + "\">" + t + "</font>";
i++;
}
}
texty = texty.replace("> <", "> <");
var REPLACEME = "[rainbow]" + RAINBOWTEXT + "[/rainbow]";
txt = txt.replace(REPLACEME, texty);
if (lastChar == txt.lastIndexOf("[/rainbow]")) {
break;
}
nextChar = lastChar + 10;
firstChar = txt.indexOf("[rainbow]", lastChar) + 9;
lastChar = txt.indexOf("[/rainbow]", lastChar);
}
}
txt = txt.replace("#", "'");