getting full linked list without usage of loop? - linked-list

LinkedListA = 3->4->5
LinkedListB = 12->6->9
I am simply trying to add linkedlistB at the end of the first linkedlistA.
I am not able to figure out Why the final while loop is able to print
the complete linkedlistA WITH all the nodes added from linkedlistB!
public static void joinLists(Node headA, Node headB)
{
Node currentA = headA;
Node currentB = headB;
while( currentA.nextLink != null )
{
currentA = currentA.nextLink;
}
Node newElement = currentB;
currentA.nextLink = newElement; //there is not loop here as you can see to keep updating the list with newElement taking new currentB value
currentB = currentB.nextLink;
currentA = headA;
while(currentA != null)
{
System.out.println(currentA.data);
currentA = currentA.nextLink; //output 3->4->5->12->6->9 How!?
}
}
My initial logic was doing simply this:-
public static void joinLists(Node headA, Node headB)
{
Node currentA = headA;
Node currentB = headB;
while (currentB != null)
{
currentA = head;
while( currentA.nextLink != null )
{
currentA = currentA.nextLink;
}
Node newElement = currentB;
currentA.nextLink = newElement;
currentB = currentB.nextLink;
}
currentA = headA;
while(currentA != null)
{
System.out.println(currentA.data);
currentA = currentA.nextLink;
}
}
But this doesn't seem to work!
But before that tell me how the first code seems to work?

You made the last node in A (the 5) point to the first node in B (the 12), which exactly corresponds to your output. You don't need a loop because the connections are distributed: each node only knows where the next node is. In attaching B to the end of A, only 1 link changes: the one you changed.

The first loop appends the list headB at the end of list headA.
public static Node joinLists(Node headA, Node headB)
{
if (headA == null)
{
headA = headB;
}
else
{
Node currentA = headA;
while (currentA.nextLink != null)
{
currentA = currentA.nextLink;
}
currentA.nextLink = headB;
}
Node current = headA;
while (current != null)
{
System.out.println(current.data);
current = current.nextLink;
}
return headA;
}
Then the printing loop would work.
In your second loop you tried something out (curretnA = head;).
Less variables will make understanding easier, as shown here.
One must use the return value for the joined list, as headA could be null.

LinkedList Data Structure work by the principal of ValueByReference Logic, that means, each node of linkedList can be stored anywhere in the memory location, we are just linking each node by mapping memory address to "Node.next" field
In First logic Code
Node newElement = currentB;
currentA.nextLink = newElement;
currentB = currentB.nextLink;
Your directly mapping headB pointer to last element in LinkedListA, so it similar to connecting each node in LinkedList.

Related

Memory leaks in linked lists

I have recently tried to implement concurrent sorted linked list in C. Only problem is that I have not used C in a long time and despite my code working fine I discovered via valgrind that my code has a memory leak in the insertion function which adds a node to the linked list. My problem is that I allocated the new node for insertion at the beginning of the function and followed carefully all uses of the new node yet I have not found where does the memory leak occurs. Any help finding where it is appreciated. I will post the insertion function. I will post more code if someone asks for more.
Code :
struct node {
int value;
node* next;
pthread_mutex_t lock; // hand over hand implies a lock for every
// node
};
struct list {
node* head;
};
// insert function for a new value if a value already exists then do
nothing
void insert_value(list* list, int value)
{
if(list == NULL) return; // if list pointer is NULL then do nothing`
node* new_node = malloc(sizeof(node)); // create new node
if(new_node == NULL) return; // check if allocation fails
new_node->value = value;
pthread_mutex_init(&(new_node->lock),NULL); // initialize mutex lock
// for the new node
pthread_mutex_lock(&(new_node->lock)); // lock the new node
if(list->head == NULL) // new node is the first element in the list
{
new_node->next = NULL;
list->head = new_node;
pthread_mutex_unlock(&(new_node->lock));
return;
}
pthread_mutex_lock(&(list->head->lock)); // lock the head of the list
node* temp;
if(list->head->value == new_node->value) // do nothing if the head of
// list is equal to new node
{
pthread_mutex_unlock(&(list->head->lock));
pthread_mutex_unlock(&(new_node->lock));
pthread_mutex_destroy(&(new_node->lock));
free(new_node);
return;
}
else if(list->head->value > new_node->value) // new node comes before
// the list head
{
new_node->next = list->head;
temp = list->head;
list->head = new_node;
pthread_mutex_unlock(&(list->head->lock));
pthread_mutex_unlock(&(temp->lock));
return;
}
else
{
// Locate the node before the point of insertion //
node* dummy;
node* current = list->head;
temp = current->next;
if(temp == NULL) // new node comes after
// the list head
{
new_node->next = current->next;
current->next = new_node;
pthread_mutex_unlock(&(new_node->lock));
pthread_mutex_unlock(&(current->lock));
return;
}
pthread_mutex_lock(&(temp->lock)); // lock the successor of the head
// perform hand over hand traversal of the list
// and check if temp reaches the end of the list (NULL)
while (temp->value < new_node->value)
{
pthread_mutex_unlock(&(current->lock));
current = temp;
temp = temp->next;
if(temp == NULL) break;
pthread_mutex_lock(&(temp->lock));
}
if(temp == NULL) // new node will be the last element in this case
{
current->next = new_node;
new_node->next = NULL;
pthread_mutex_unlock(&(current->lock));
pthread_mutex_unlock(&(new_node->lock));
return;
}
else if(temp->value == new_node->value) //new node already exists
{
pthread_mutex_unlock(&(temp->lock));
pthread_mutex_unlock(&(current->lock));
pthread_mutex_unlock(&(new_node->lock));
pthread_mutex_destroy(&(new_node->lock));
free(new_node);
return;
}
else // new node should be inserted inside the list
{
dummy = temp;
new_node->next = current->next;
current->next = new_node;
pthread_mutex_unlock(&(dummy->lock));
pthread_mutex_unlock(&(current->lock));
pthread_mutex_unlock(&(new_node->lock));
return;
}
}
} `

A star algorithm with multiple goals [duplicate]

Let's consider a simple grid, where any point is connected with at most 4 other points (North-East-West-South neighborhood).
I have to write program, that computes minimal route from selected initial point to any of goal points, which are connected (there is route consisting of goal points between any two goals). Of course there can be obstacles on grid.
My solution is quite simple: I'm using A* algorithm with variable heuristic function h(x) - manhattan distance from x to nearest goal point. To find nearest goal point I have to do linear search (in O(n), where n - number of goal points). Here is my question: is there any more efficient solution (heuristic function) to dynamically find nearest goal point (where time < O(n))?
Or maybe A* is not good way to solve that problem?
How many goals, tens or thousands? If tens your way will work fine, if thousands then nearest neighbor search will give you ideas on setting up your data to search quickly.
The tradeoffs are obvious, spatially organizing your data to search will take time and on small sets brute force will be simpler to maintain. Since you're constantly evaluating I think that structuring the data will be worthwhile at very low numbers of points.
An alternate way to do this would be a modified flood fill algorithm that stops once it reaches a destination point during the flood.
First, decide whether you need to optimize, because any optimization is going to complicate your code, and for a small number of goals, your current solution is probably fine for a simple heuristic like Manhattan distance.
Before taking the first step, compute the heuristic for each goal. Remember the nearest goal as the currently selected goal, and move toward it, but subtract the maximum possible progress toward any goal from all the other distances. You can consider this second value a "meta-heuristic"; it is an optimistic estimate of the heuristic for other goals.
On subsequent steps, compute the heuristic for the current goal, and any goals with a "meta-heuristic" that is less than or equal to the heuristic. The other goals can't possibly have a better heuristic, so you don't need to compute them. The nearest goal becomes the new current goal; move toward it, subtracting the maximum possible progress from the others. Repeat until you arrive at a goal.
Use Dijkstra's algorithm, which has as it's output the minimal cost to all reachable points. Then you just select the goal points from the output.
you may consider this article If your goals not too much and want simple ways
If you want to search for any of several goals, construct a heuristic
h'(x) that is the minimum of h1(x), h2(x), h3(x), ... where h1, h2, h3
are heuristics to each of the nearby spots.
One way to think about this is that we can add a new zero-cost edge
from each of the goals to a new graph node. A path to that new node
will necessarily go through one of the goal nodes.
If you want to search for paths to all of several goals, your best
option may be Dijkstra’s Algorithm with early exit when you find all
the goals. There may be a variant of A* that can calculate these
paths; I don’t know.
If you want to search for spot near a single goal, ask A* search to
find a path to the center of the goal area. While processing nodes
from the OPEN set, exit when you pull a node that is near enough.
You can calculate the f score using the nearest target. As others said, for naive approach, you can directly calculate all target distance from current node and pick the minimum, if you only have few targets to search. For more than 100 targets, you can probably find the nearest by KDTree to speed up the process.
Here is a sample code in dart.
Iterable<Vector2> getPath(Vector2 from, Iterable<Vector2> targets,
{double? maxDistance, bool useAStar = false}) {
targets = targets.asSet();
clearPoints();
var projectedTargets = addPoints(targets).toSet();
var tree = useAStar ? IKDTree(targets) : null;
var q = PriorityQueue<Node>(_priorityQueueComparor);
Map<Vector2, Node> visited = {};
var node = Node(from);
visited[from] = node;
q.add(node);
while (q.isNotEmpty) {
var current = q.removeFirst();
// developer.log(
// '${current.point}#${current.distance}: ${getEdges(current.point).map((e) => e.dest)}');
for (var edge in getEdges(current.point)) {
if (visited.containsKey(edge.dest)) continue;
var distance = current.distance + edge.distance;
// too far
if (maxDistance != null && distance > maxDistance) continue;
// it is a target
if (projectedTargets.contains(edge.dest)) {
return reconstructPath(visited, current, edge.dest);
}
// we only interested in exploring polygon node.
if (!_polygonPoints.contains(edge.dest)) continue;
var f = 0.0;
if (tree != null) {
var nearest = tree
.nearest(edge.dest, maxDistance: maxDistance ?? double.infinity)
.firstOrNull;
f = nearest != null ? edge.dest.distanceToSquared(nearest) : 0.0;
}
node = Node(edge.dest, distance, current.count + 1, current.point, f);
visited[edge.dest] = node;
q.add(node);
}
}
return [];
}
Iterable<Vector2> reconstructPath(
Map<Vector2, Node> visited, Node prev, Vector2 point) {
var path = <Vector2>[point];
Node? currentNode = prev;
while (currentNode != null) {
path.add(currentNode.point);
currentNode = visited[currentNode.prev];
}
return path.reversed;
}
int _priorityQueueComparor(Node p0, Node p1) {
int r;
if (p0.f > 0 && p1.f > 0) {
r = ((p0.distance * p0.distance) + p0.f)
.compareTo((p1.distance * p1.distance) + p1.f);
if (r != 0) return r;
}
r = p0.distance.compareTo(p1.distance);
if (r != 0) return r;
return p0.count.compareTo(p1.count);
}
and the implementation of KDTree
class IKDTree {
final int _dimensions = 2;
late Node? _root;
IKDTree(Iterable<Vector2> points) {
_root = _buildTree(points, null);
}
Node? _buildTree(Iterable<Vector2> points, Node? parent) {
var list = points.asList();
if (list.isEmpty) return null;
var median = (list.length / 2).floor();
// Select the longest dimension as division axis
var axis = 0;
var aabb = AABB.fromPoints(list);
for (var i = 1; i < _dimensions; i++) {
if (aabb.range[i] > aabb.range[axis]) {
axis = i;
}
}
// Divide by the division axis and recursively build.
// var list = list.orderBy((e) => _selector(e)[axis]).asList();
list.sort(((a, b) => a[axis].compareTo(b[axis])));
var point = list[median];
var node = Node(point.clone());
node.parent = parent;
node.left = _buildTree(list.sublist(0, median), node);
node.right = _buildTree(list.sublist(median + 1), node);
update(node);
return node;
}
void addPoint(Vector2 point, [bool allowRebuild = true]) {
_root = _addByPoint(_root, point, allowRebuild, 0);
}
// void removePoint(Vector2 point, [bool allowRebuild = true]) {
// if (node == null) return;
// _removeNode(node, allowRebuild);
// }
Node? _addByPoint(
Node? node, Vector2 point, bool allowRebuild, int parentDim) {
if (node == null) {
node = Node(point.clone());
node.dimension = (parentDim + 1) % _dimensions;
update(node);
return node;
}
_pushDown(node);
if (point[node.dimension] < node.point[node.dimension]) {
node.left = _addByPoint(node.left, point, allowRebuild, node.dimension);
} else {
node.right = _addByPoint(node.right, point, allowRebuild, node.dimension);
}
update(node);
bool needRebuild = allowRebuild && criterionCheck(node);
if (needRebuild) node = rebuild(node);
return node;
}
// checked
void _pushDown(Node? node) {
if (node == null) return;
if (node.needPushDownToLeft && node.left != null) {
node.left!.treeDownsampleDeleted |= node.treeDownsampleDeleted;
node.left!.pointDownsampleDeleted |= node.treeDownsampleDeleted;
node.left!.treeDeleted =
node.treeDeleted || node.left!.treeDownsampleDeleted;
node.left!.deleted =
node.left!.treeDeleted || node.left!.pointDownsampleDeleted;
if (node.treeDownsampleDeleted) {
node.left!.downDeletedNum = node.left!.treeSize;
}
if (node.treeDeleted) {
node.left!.invalidNum = node.left!.treeSize;
} else {
node.left!.invalidNum = node.left!.downDeletedNum;
}
node.left!.needPushDownToLeft = true;
node.left!.needPushDownToRight = true;
node.needPushDownToLeft = false;
}
if (node.needPushDownToRight && node.right != null) {
node.right!.treeDownsampleDeleted |= node.treeDownsampleDeleted;
node.right!.pointDownsampleDeleted |= node.treeDownsampleDeleted;
node.right!.treeDeleted =
node.treeDeleted || node.right!.treeDownsampleDeleted;
node.right!.deleted =
node.right!.treeDeleted || node.right!.pointDownsampleDeleted;
if (node.treeDownsampleDeleted) {
node.right!.downDeletedNum = node.right!.treeSize;
}
if (node.treeDeleted) {
node.right!.invalidNum = node.right!.treeSize;
} else {
node.right!.invalidNum = node.right!.downDeletedNum;
}
node.right!.needPushDownToLeft = true;
node.right!.needPushDownToRight = true;
node.needPushDownToRight = false;
}
}
void _removeNode(Node? node, bool allowRebuild) {
if (node == null || node.deleted) return;
_pushDown(node);
node.deleted = true;
node.invalidNum++;
if (node.invalidNum == node.treeSize) {
node.treeDeleted = true;
}
// update and rebuild parent
var parent = node.parent;
if (parent != null) {
updateAncestors(parent);
bool needRebuild = allowRebuild && criterionCheck(parent);
if (needRebuild) parent = rebuild(parent);
}
}
void updateAncestors(Node? node) {
if (node == null) return;
update(node);
updateAncestors(node.parent);
}
void _removeByPoint(Node? node, Vector2 point, bool allowRebuild) {
if (node == null || node.treeDeleted) return;
_pushDown(node);
if (node.point == point && !node.deleted) {
node.deleted = true;
node.invalidNum++;
if (node.invalidNum == node.treeSize) {
node.treeDeleted = true;
}
return;
}
if (point[node.dimension] < node.point[node.dimension]) {
_removeByPoint(node.left, point, false);
} else {
_removeByPoint(node.right, point, false);
}
update(node);
bool needRebuild = allowRebuild && criterionCheck(node);
if (needRebuild) rebuild(node);
}
// checked
void update(Node node) {
var left = node.left;
var right = node.right;
node.treeSize = (left != null ? left.treeSize : 0) +
(right != null ? right.treeSize : 0) +
1;
node.invalidNum = (left != null ? left.invalidNum : 0) +
(right != null ? right.invalidNum : 0) +
(node.deleted ? 1 : 0);
node.downDeletedNum = (left != null ? left.downDeletedNum : 0) +
(right != null ? right.downDeletedNum : 0) +
(node.pointDownsampleDeleted ? 1 : 0);
node.treeDownsampleDeleted = (left == null || left.treeDownsampleDeleted) &&
(right == null || right.treeDownsampleDeleted) &&
node.pointDownsampleDeleted;
node.treeDeleted = (left == null || left.treeDeleted) &&
(right == null || right.treeDeleted) &&
node.deleted;
var minList = <Vector2>[];
var maxList = <Vector2>[];
if (left != null && !left.treeDeleted) {
minList.add(left.aabb.min);
maxList.add(left.aabb.max);
}
if (right != null && !right.treeDeleted) {
minList.add(right.aabb.min);
maxList.add(right.aabb.max);
}
if (!node.deleted) {
minList.add(node.point);
maxList.add(node.point);
}
if (minList.isNotEmpty && maxList.isNotEmpty) {
node.aabb = AABB()
..min = minList.min()
..max = maxList.max();
}
// TODO: Radius data for search: https://github.com/hku-mars/ikd-Tree/blob/main/ikd-Tree/ikd_Tree.cpp#L1312
if (left != null) left.parent = node;
if (right != null) right.parent = node;
// TODO: root alpha value for multithread
}
// checked
final minimalUnbalancedTreeSize = 10;
final deleteCriterionParam = 0.3;
final balanceCriterionParam = 0.6;
bool criterionCheck(Node node) {
if (node.treeSize <= minimalUnbalancedTreeSize) return false;
double balanceEvaluation = 0.0;
double deleteEvaluation = 0.0;
var child = node.left ?? node.right!;
deleteEvaluation = node.invalidNum / node.treeSize;
balanceEvaluation = child.treeSize / (node.treeSize - 1);
if (deleteEvaluation > deleteCriterionParam) return true;
if (balanceEvaluation > balanceCriterionParam ||
balanceEvaluation < 1 - balanceCriterionParam) return true;
return false;
}
void rebuildAll() {
_root = rebuild(_root);
}
// checked
Node? rebuild(Node? node) {
if (node == null) return null;
var parent = node.parent;
var points = flatten(node).toList();
// log('rebuilding: $node objects: ${objects.length}');
deleteTreeNodes(node);
return _buildTree(points, parent);
// if (parent == null) {
// _root = newNode;
// } else if (parent.left == node) {
// parent.left = newNode;
// } else if (parent.right == node) {
// parent.right = newNode;
// }
}
// checked
Iterable<Vector2> flatten(Node? node) sync* {
if (node == null) return;
_pushDown(node);
if (!node.deleted) yield node.point;
yield* flatten(node.left);
yield* flatten(node.right);
}
void deleteTreeNodes(Node? node) {
if (node == null) return;
_pushDown(node);
deleteTreeNodes(node.left);
deleteTreeNodes(node.right);
}
double _calcDist(Vector2 a, Vector2 b) {
double dist = 0;
for (var dim = 0; dim < _dimensions; dim++) {
dist += math.pow(a[dim] - b[dim], 2);
}
return dist;
}
// checked
double _calcBoxDist(Node? node, Vector2 point) {
if (node == null) return double.infinity;
double minDist = 0;
for (var dim = 0; dim < _dimensions; dim++) {
if (point[dim] < node.aabb.min[dim]) {
minDist += math.pow(point[dim] - node.aabb.min[dim], 2);
}
if (point[dim] > node.aabb.max[dim]) {
minDist += math.pow(point[dim] - node.aabb.max[dim], 2);
}
}
return minDist;
}
void _search(Node? node, int maxNodes, Vector2 point, BinaryHeap<Result> heap,
double maxDist) {
if (node == null || node.treeDeleted) return;
double curDist = _calcBoxDist(node, point);
double maxDistSqr = maxDist * maxDist;
if (curDist > maxDistSqr) return;
if (node.needPushDownToLeft || node.needPushDownToRight) {
_pushDown(node);
}
if (!node.deleted) {
double dist = _calcDist(point, node.point);
if (dist <= maxDistSqr &&
(heap.size() < maxNodes || dist < heap.peek().distance)) {
if (heap.size() >= maxNodes) heap.pop();
heap.push(Result(node, dist));
}
}
double distLeftNode = _calcBoxDist(node.left, point);
double distRightNode = _calcBoxDist(node.right, point);
if (heap.size() < maxNodes ||
distLeftNode < heap.peek().distance &&
distRightNode < heap.peek().distance) {
if (distLeftNode <= distRightNode) {
_search(node.left, maxNodes, point, heap, maxDist);
if (heap.size() < maxNodes || distRightNode < heap.peek().distance) {
_search(node.right, maxNodes, point, heap, maxDist);
}
} else {
_search(node.right, maxNodes, point, heap, maxDist);
if (heap.size() < maxNodes || distLeftNode < heap.peek().distance) {
_search(node.left, maxNodes, point, heap, maxDist);
}
}
} else {
if (distLeftNode < heap.peek().distance) {
_search(node.left, maxNodes, point, heap, maxDist);
}
if (distRightNode < heap.peek().distance) {
_search(node.right, maxNodes, point, heap, maxDist);
}
}
}
/// Find the [maxNodes] of nearest Nodes.
/// Distance is calculated via Metric function.
/// Max distance can be set with [maxDistance] param
Iterable<Vector2> nearest(Vector2 point,
{int maxNodes = 1, double maxDistance = double.infinity}) sync* {
var heap = BinaryHeap<Result>((e) => -e.distance);
_search(_root, maxNodes, point, heap, maxDistance);
var found = math.min(maxNodes, heap.content.length);
for (var i = 0; i < found; i++) {
yield heap.content[i].node.point;
}
}
int get length => _root?.length ?? 0;
int get height => _root?.height ?? 0;
}
class Result {
final Node node;
final double distance;
const Result(this.node, this.distance);
}
class Node {
Vector2 point;
int dimension = 0;
Node? parent;
Node? left;
Node? right;
int treeSize = 0;
int invalidNum = 0;
int downDeletedNum = 0;
bool deleted = false;
bool treeDeleted = false;
bool needPushDownToLeft = false;
bool needPushDownToRight = false;
bool treeDownsampleDeleted = false;
bool pointDownsampleDeleted = false;
AABB aabb = AABB();
Node(this.point);
int get length {
return 1 +
(left == null ? 0 : left!.length) +
(right == null ? 0 : right!.length);
}
int get height {
return 1 +
math.max(
left == null ? 0 : left!.height,
right == null ? 0 : right!.height,
);
}
int get depth {
return 1 + (parent == null ? 0 : parent!.depth);
}
}

What will be the time complexity of reversing the linked list in a different way using below code?

Given a linked List $link1, with elements (a->b->c->d->e->f->g->h->i->j), we need to reverse the linked list provided that the reversing will be done in a manner like -
Reverse 1st element (a)
Reverse next 2 elements (a->c->b)
Reverse next 3 elements (a->c->b->f->e->d)
Reverse next 4 elements (a->c->b->f->e->d->j->i->h->g)
....
....
I have created below code in PHP to solve this problem
Things I need -
I need to calculate the time complexity of reverseLinkedList function below.
Need to know if we can optimize reverseLinkedList function to reduce time complexity.
-
class ListNode
{
public $data;
public $next;
function __construct($data)
{
$this->data = $data;
$this->next = NULL;
}
function read_node()
{
return $this->data;
}
}
class LinkList
{
private $first_node;
private $last_node;
private $count;
function __construct()
{
$this->first_node = NULL;
$this->last_node = NULL;
$this->count = 0;
}
function size()
{
return $this->count;
}
public function read_list()
{
$listData = array();
$current = $this->first_node;
while($current != NULL)
{
echo $current->read_node().' ';
$current = $current->next;
}
}
public function reverse_list()
{
if(($this->first_node != NULL)&&($this->first_node->next != NULL))
{
$current = $this->first_node;
$new = NULL;
while ($current != NULL)
{
$temp = $current->next;
$current->next = $new;
$new = $current;
$current = $temp;
}
$this->first_node = $new;
}
}
public function read_node($position)
{
if($position <= $this->count)
{
$current = $this->first_node;
$pos = 1;
while($pos != $position)
{
if($current->next == NULL)
return null;
else
$current = $current->next;
$pos++;
}
return $current->data;
}
else
return NULL;
}
public function insert($data)
{
$new_node = new ListNode($data);
if($this->first_node != NULL)
{
$this->last_node->next = $new_node;
$new_node->next = NULL;
$this->last_node = &$new_node;
$this->count++;
}
else
{
$new_node->next = $this->first_node;
$this->first_node = &$new_node;
if($this->last_node == NULL)
$this->last_node = &$new_node;
$this->count++;
}
}
}
//Create linked list
$link1 = new LinkList();
//Insert elements
$link1->insert('a');
$link1->insert('b');
$link1->insert('c');
$link1->insert('d');
$link1->insert('e');
$link1->insert('f');
$link1->insert('g');
$link1->insert('h');
$link1->insert('i');
$link1->insert('j');
echo "<b>Input :</b><br>";
$link1->read_list();
//function to reverse linked list in specified manner
function reverseLinkedList(&$link1)
{
$size= $link1->size();
if($size>2)
{
$link2=new LinkList();
$link2->insert($link1->read_node(1));
$elements_covered=1;
//reverse
$rev_size=2;
while($elements_covered<$size)
{
$start=$elements_covered+1;
$temp_link = new LinkList();
$temp_link->insert($link1->read_node($start));
for($i=1;$i<$rev_size;$i++)
{
$temp_link->insert($link1->read_node(++$start));
}
$temp_link->reverse_list();
$temp_size=$temp_link->size();
$link2_size=$link2->size();
for($i=1;$i<=$temp_size;$i++)
{
$link2->insert($temp_link->read_node($i));
++$elements_covered;
++$link2_size;
}
++$rev_size;
}
///reverse
//Flip the linkedlist
$link1=$link2;
}
}
///function to reverse linked list in specified manner
//Reverse current linked list $link1
reverseLinkedList($link1);
echo "<br><br><b>Output :</b><br>";
$link1->read_list();
It's O(n)...just one traversal.
And secondly, here tagging it in language is not necessary.
I have provided a Pseudocode here for your reference:
current => head_ref
prev => NULL;
current => head_ref;
next => null;
while (current != NULL)
{
next = current->next;
current->next = prev;
prev = current;
current = next;
}
*head_ref = prev;

An exception of type 'System.OutOfMemoryException' occurred in itextsharp.dll but was not handled in user code

I am using iTextSharp to create pdf. I have 100k records, but I am getting following exception:
An exception of type 'System.OutOfMemoryException' occurred in
itextsharp.dll but was not handled in user code At the line:
bodyTable.AddCell(currentProperty.GetValue(lst, null).ToString());
Code is:
var doc = new Document(pageSize);
PdfWriter.GetInstance(doc, stream);
doc.Open();
//Get exportable count
int columns = 0;
Type currentType = list[0].GetType();
//PREPARE HEADER
//foreach visible columns check if current object has proerpty
//else search in inner properties
foreach (var visibleColumn in visibleColumns)
{
if (currentType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key) != null)
{
columns++;
}
else
{
//check child property objects
var childProperties = currentType.GetProperties();
foreach (var prop in childProperties)
{
if (prop.PropertyType.BaseType == typeof(BaseEntity))
{
if (prop.PropertyType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key) != null)
{
columns++;
break;
}
}
}
}
}
//header
var headerTable = new PdfPTable(columns);
headerTable.WidthPercentage = 100f;
foreach (var visibleColumn in visibleColumns)
{
if (currentType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key) != null)
{
//headerTable.AddCell(prop.Name);
headerTable.AddCell(visibleColumn.Value);
}
else
{
//check child property objects
var childProperties = currentType.GetProperties();
foreach (var prop in childProperties)
{
if (prop.PropertyType.BaseType == typeof(BaseEntity))
{
if (prop.PropertyType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key) != null)
{
//headerTable.AddCell(prop.Name);
headerTable.AddCell(visibleColumn.Value);
break;
}
}
}
}
}
doc.Add(headerTable);
var bodyTable = new PdfPTable(columns);
bodyTable.Complete = false;
bodyTable.WidthPercentage = 100f;
//PREPARE DATA
foreach (var lst in list)
{
int col = 1;
foreach (var visibleColumn in visibleColumns)
{
var currentProperty = currentType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key);
if (currentProperty != null)
{
if (currentProperty.GetValue(lst, null) != null)
bodyTable.AddCell(currentProperty.GetValue(lst, null).ToString());
else
bodyTable.AddCell(string.Empty);
col++;
}
else
{
//check child property objects
var childProperties = currentType.GetProperties().Where(p => p.PropertyType.BaseType == typeof(BaseEntity));
foreach (var prop in childProperties)
{
currentProperty = prop.PropertyType.GetProperties().FirstOrDefault(p => p.Name == visibleColumn.Key);
if (currentProperty != null)
{
var currentPropertyObjectValue = prop.GetValue(lst, null);
if (currentPropertyObjectValue != null)
{
bodyTable.AddCell(currentProperty.GetValue(currentPropertyObjectValue, null).ToString());
}
else
{
bodyTable.AddCell(string.Empty);
}
break;
}
}
}
}
}
doc.Add(bodyTable);
doc.Close();
A back of the envelope computation of the memory requirements given the data you provided for memory consumption gives 100000 * 40 * (2*20+4) = 167MBs. Well within your memory limit, but it is just a lower bound. I imagine each Cell object is pretty big. If each cell would have a 512 byte overhead you could be well looking at 2GB taken. I reckon it might be even more, as PDF is a complex beast.
So you might realistically be looking at a situation where you are actually running out of memory. If not your computers, then at least the bit C# has set aside for its own thing.
I would do one thing first - check memory consumption like here. You might even do well to try with 10, 100, 1000, 10000, 100000 rows and see up until what number of rows the program works.
You could perhaps try a different thing altogether. If you're trying to print a nicely formatted table with a lot of data, perhaps you could output an HTML document, which can be done incrementally and which you can do by just writing stuff to a file, rather than using a third party library. You can then "print" that HTML document to PDF. StackOverflow to the rescue again with this problem.

FromOutcome for `Switch Node` directing flow to `Method Call Node` don't wont to work

I have difined my flow as:
builder.id("", PublisherBean.PUBLISHER_FLOW_NAME);
builder.viewNode("list", "/pages/publishers.xhtml");
builder.viewNode("details", "/pages/publishers-details.xhtml");
builder.viewNode("deleted", "/pages/publishers-deleted.xhtml");
builder.viewNode("form", "/pages/publishers-form.xhtml");
builder.viewNode("exit", "/index.xhtml");
builder.methodCallNode("invoke-update")
.expression("#{publisherBean.update()}")
.defaultOutcome("details");
builder.methodCallNode("switch-fail")
.defaultOutcome("invoke-publishers")
.expression("#{publisherBean.switchFail()}");
builder.switchNode("proceed-action-request")
.defaultOutcome("switch-fail")
.switchCase()
.condition("#{publisherBean.actionType.ifEdit()}").fromOutcome("form");
builder.switchNode("go-for-it")
.defaultOutcome("switch-fail")
.switchCase()
.switchCase()
.condition("#{publisherBean.actionType.ifEdit()}").fromOutcome("invoke-update");
as you can see, there is two switch nodes. First directs to a View Node, second one is trying to direct to a Method Call Node.
First one works fine, however second is giving me a headache. Second one is giving me an error
Unable to find matching navigation case with from-view-id '/pages/publishers-form.xhtml' for action '#{publisherBean.proceed()}' with outcome 'proceed-form'.
proceed function is just
public String proceed() {
LOG.log(Level.OFF, "Form proceed in action type {0}", actionType);
return "go-for-it";
}
Logged info confirms, that publisherBean.actionType.ifEdit() returns true, however that fact is ignored. If i change outcome from invoke-update to form or any other View Node id, then it "works fine".
Is it i'm doing something wrong, or Method Call Node cant be used as an outcome to a Switch Node?
I run into this issue too. In my case the problem is in calling Method Call Node after another Method Call Node.
I invesigated it a bit and found a problem in: com.sun.faces.application.NavigationHandlerImpl.synthesizeCaseStruct method. This method is used to determine where to go from methodCallNode or switchCallNode and it only looks at viewNodes and returnNodes.
private CaseStruct synthesizeCaseStruct(FacesContext context, Flow flow, String fromAction, String outcome) {
CaseStruct result = null;
FlowNode node = flow.getNode(outcome);
if (null != node) {
if (node instanceof ViewNode) {
result = new CaseStruct();
result.viewId = ((ViewNode)node).getVdlDocumentId();
result.navCase = new MutableNavigationCase(fromAction,
fromAction, outcome, null, result.viewId,
flow.getDefiningDocumentId(), null, false, false);
} else if (node instanceof ReturnNode) {
String fromOutcome = ((ReturnNode)node).getFromOutcome(context);
FlowHandler flowHandler = context.getApplication().getFlowHandler();
try {
flowHandler.pushReturnMode(context);
result = getViewId(context, fromAction, fromOutcome, FlowHandler.NULL_FLOW);
// We are in a return node, but no result can be found from that
// node. Show the last displayed viewId from the preceding flow.
if (null == result) {
Flow precedingFlow = flowHandler.getCurrentFlow(context);
if (null != precedingFlow) {
String toViewId = flowHandler.getLastDisplayedViewId(context);
if (null != toViewId) {
result = new CaseStruct();
result.viewId = toViewId;
result.navCase = new MutableNavigationCase(context.getViewRoot().getViewId(),
fromAction,
outcome,
null,
toViewId,
FlowHandler.NULL_FLOW,
null,
false,
false);
}
}
} else {
result.newFlow = FlowImpl.SYNTHESIZED_RETURN_CASE_FLOW;
}
}
finally {
flowHandler.popReturnMode(context);
}
}
} else {
// See if there is an implicit match within this flow, using outcome
// to derive a view id within this flow.
String currentViewId = outcome;
// If the viewIdToTest needs an extension, take one from the currentViewId.
String currentExtension;
int idx = currentViewId.lastIndexOf('.');
if (idx != -1) {
currentExtension = currentViewId.substring(idx);
} else {
// PENDING, don't hard code XHTML here, look it up from configuration
currentExtension = ".xhtml";
}
String viewIdToTest = "/" + flow.getId() + "/" + outcome + currentExtension;
ViewHandler viewHandler = Util.getViewHandler(context);
viewIdToTest = viewHandler.deriveViewId(context, viewIdToTest);
if (null != viewIdToTest) {
result = new CaseStruct();
result.viewId = viewIdToTest;
result.navCase = new MutableNavigationCase(fromAction,
fromAction, outcome, null, result.viewId,
null, false, false);
}
}
return result;
}

Resources