1 | #include <stdio.h> |
---|
2 | #include <stdlib.h> |
---|
3 | #include <ctype.h> |
---|
4 | #include <string.h> |
---|
5 | |
---|
6 | #include "adlocal.h" |
---|
7 | /*#include "arbdb.h"*/ |
---|
8 | #include "adlundo.h" |
---|
9 | |
---|
10 | #define GB_INDEX_FIND(gbf,ifs,quark) \ |
---|
11 | for (ifs = GBCONTAINER_IFS(gbf); ifs; ifs = GB_INDEX_FILES_NEXT(ifs)) { \ |
---|
12 | if (ifs->key == quark) break; \ |
---|
13 | } |
---|
14 | |
---|
15 | /* write field in index table */ |
---|
16 | char *gb_index_check_in(GBDATA *gbd) |
---|
17 | { |
---|
18 | struct gb_index_files_struct *ifs; |
---|
19 | |
---|
20 | GBQUARK quark; |
---|
21 | unsigned long index; |
---|
22 | GB_CSTR data; |
---|
23 | GBCONTAINER *gfather; |
---|
24 | |
---|
25 | gfather = GB_GRANDPA(gbd); |
---|
26 | if (!gfather) return 0; |
---|
27 | |
---|
28 | quark = GB_KEY_QUARK(gbd); |
---|
29 | GB_INDEX_FIND(gfather,ifs,quark); |
---|
30 | if (!ifs) return 0; /* This key is not indexed */ |
---|
31 | |
---|
32 | if (GB_TYPE(gbd) != GB_STRING && GB_TYPE(gbd) != GB_LINK) return 0; |
---|
33 | |
---|
34 | if (gbd->flags2.is_indexed) |
---|
35 | { |
---|
36 | GB_internal_error("Double checked in"); |
---|
37 | return 0; |
---|
38 | } |
---|
39 | |
---|
40 | data = GB_read_char_pntr(gbd); |
---|
41 | GB_CALC_HASH_INDEX(data,index,ifs->hash_table_size, ifs->case_sens); |
---|
42 | ifs->nr_of_elements++; |
---|
43 | { |
---|
44 | struct gb_if_entries *ifes; |
---|
45 | GB_REL_IFES *entries = GB_INDEX_FILES_ENTRIES(ifs); |
---|
46 | |
---|
47 | ifes = (struct gb_if_entries *)gbm_get_mem(sizeof(struct gb_if_entries), |
---|
48 | GB_GBM_INDEX(gbd)); |
---|
49 | |
---|
50 | SET_GB_IF_ENTRIES_NEXT(ifes,GB_ENTRIES_ENTRY(entries,index)); |
---|
51 | SET_GB_IF_ENTRIES_GBD(ifes,gbd); |
---|
52 | SET_GB_ENTRIES_ENTRY(entries,index,ifes); |
---|
53 | } |
---|
54 | gbd->flags2.tisa_index = 1; |
---|
55 | gbd->flags2.is_indexed = 1; |
---|
56 | return 0; |
---|
57 | } |
---|
58 | |
---|
59 | /* remove entry from index table */ |
---|
60 | void gb_index_check_out(GBDATA *gbd) { |
---|
61 | if (gbd->flags2.is_indexed) { |
---|
62 | GB_ERROR error = 0; |
---|
63 | GBCONTAINER *gfather = GB_GRANDPA(gbd); |
---|
64 | GBQUARK quark = GB_KEY_QUARK(gbd); |
---|
65 | |
---|
66 | struct gb_index_files_struct *ifs; |
---|
67 | |
---|
68 | gbd->flags2.is_indexed = 0; |
---|
69 | GB_INDEX_FIND(gfather, ifs, quark); |
---|
70 | |
---|
71 | if (!ifs) error = "key is not indexed"; |
---|
72 | else { |
---|
73 | error = GB_push_transaction(gbd); |
---|
74 | if (!error) { |
---|
75 | GB_CSTR data = GB_read_char_pntr(gbd); |
---|
76 | |
---|
77 | if (!data) { |
---|
78 | error = GBS_global_string("can't read key value (%s)", GB_await_error()); |
---|
79 | } |
---|
80 | else { |
---|
81 | unsigned long index; |
---|
82 | GB_CALC_HASH_INDEX(data, index, ifs->hash_table_size, ifs->case_sens); |
---|
83 | |
---|
84 | struct gb_if_entries *ifes2 = 0; |
---|
85 | GB_REL_IFES *entries = GB_INDEX_FILES_ENTRIES(ifs); |
---|
86 | struct gb_if_entries *ifes; |
---|
87 | |
---|
88 | for (ifes = GB_ENTRIES_ENTRY(entries,index); ifes; ifes = GB_IF_ENTRIES_NEXT(ifes)) { |
---|
89 | if (gbd == GB_IF_ENTRIES_GBD(ifes)) { /* entry found */ |
---|
90 | if (ifes2) SET_GB_IF_ENTRIES_NEXT(ifes2, GB_IF_ENTRIES_NEXT(ifes)); |
---|
91 | else SET_GB_ENTRIES_ENTRY(entries,index,GB_IF_ENTRIES_NEXT(ifes)); |
---|
92 | |
---|
93 | ifs->nr_of_elements--; |
---|
94 | gbm_free_mem((char *)ifes, sizeof(struct gb_if_entries), GB_GBM_INDEX(gbd)); |
---|
95 | break; |
---|
96 | } |
---|
97 | ifes2 = ifes; |
---|
98 | } |
---|
99 | } |
---|
100 | } |
---|
101 | error = GB_end_transaction(gbd, error); |
---|
102 | } |
---|
103 | |
---|
104 | if (error) { |
---|
105 | error = GBS_global_string("gb_index_check_out failed for key '%s' (%s)\n", GB_KEY(gbd), error); |
---|
106 | GB_internal_error(error); |
---|
107 | } |
---|
108 | } |
---|
109 | } |
---|
110 | |
---|
111 | GB_ERROR GB_create_index(GBDATA *gbd, const char *key, GB_CASE case_sens, long estimated_size) { |
---|
112 | /* Create an index for a database. |
---|
113 | * Uses hash tables - collisions are avoided by using linked lists. |
---|
114 | */ |
---|
115 | GB_ERROR error = 0; |
---|
116 | |
---|
117 | if (GB_TYPE(gbd) != GB_DB) { |
---|
118 | error = GB_export_error("GB_create_index used on non CONTAINER Type"); |
---|
119 | } |
---|
120 | else if (GB_read_clients(gbd)<0) { |
---|
121 | error = GB_export_error("No index tables in DB clients allowed"); |
---|
122 | } |
---|
123 | else { |
---|
124 | GBCONTAINER *gbc = (GBCONTAINER *)gbd; |
---|
125 | GBQUARK key_quark = GB_key_2_quark(gbd,key); |
---|
126 | |
---|
127 | struct gb_index_files_struct *ifs; |
---|
128 | |
---|
129 | GB_INDEX_FIND(gbc,ifs,key_quark); |
---|
130 | |
---|
131 | if (!ifs) { /* if not already have index (e.g. if fast-loaded) */ |
---|
132 | GBDATA *gbf; |
---|
133 | |
---|
134 | ifs = (struct gb_index_files_struct *)GB_calloc(sizeof(struct gb_index_files_struct),1); |
---|
135 | SET_GB_INDEX_FILES_NEXT(ifs,GBCONTAINER_IFS(gbc)); |
---|
136 | SET_GBCONTAINER_IFS(gbc,ifs); |
---|
137 | |
---|
138 | ifs->key = key_quark; |
---|
139 | ifs->hash_table_size = GBS_get_a_prime(estimated_size); |
---|
140 | ifs->nr_of_elements = 0; |
---|
141 | ifs->case_sens = case_sens; |
---|
142 | |
---|
143 | SET_GB_INDEX_FILES_ENTRIES(ifs, (struct gb_if_entries **)GB_calloc(sizeof(void *),(int)ifs->hash_table_size)); |
---|
144 | |
---|
145 | for (gbf = GB_find_sub_by_quark(gbd,-1,0); |
---|
146 | gbf; |
---|
147 | gbf = GB_find_sub_by_quark(gbd,-1,gbf)) |
---|
148 | { |
---|
149 | if (GB_TYPE(gbf) == GB_DB) { |
---|
150 | GBDATA *gb2; |
---|
151 | |
---|
152 | for (gb2 = GB_find_sub_by_quark(gbf,key_quark,0); |
---|
153 | gb2; |
---|
154 | gb2 = GB_find_sub_by_quark(gbf,key_quark,gb2)) |
---|
155 | { |
---|
156 | if (GB_TYPE(gb2) != GB_STRING && GB_TYPE(gb2) != GB_LINK) continue; |
---|
157 | gb_index_check_in(gb2); |
---|
158 | } |
---|
159 | } |
---|
160 | } |
---|
161 | } |
---|
162 | } |
---|
163 | return error; |
---|
164 | } |
---|
165 | |
---|
166 | #if defined(DEBUG) |
---|
167 | |
---|
168 | NOT4PERL void GB_dump_indices(GBDATA *gbd) { |
---|
169 | // dump indices of container |
---|
170 | |
---|
171 | char *db_path = strdup(GB_get_db_path(gbd)); |
---|
172 | |
---|
173 | if (GB_TYPE(gbd) != GB_DB) { |
---|
174 | fprintf(stderr, "'%s' (%s) is no container.\n", db_path, GB_get_type_name(gbd)); |
---|
175 | } |
---|
176 | else { |
---|
177 | struct gb_index_files_struct *ifs; |
---|
178 | |
---|
179 | int index_count = 0; |
---|
180 | GBCONTAINER *gbc = (GBCONTAINER*)gbd; |
---|
181 | GB_MAIN_TYPE *Main = GBCONTAINER_MAIN(gbc); |
---|
182 | |
---|
183 | for (ifs = GBCONTAINER_IFS(gbc); ifs; ifs = GB_INDEX_FILES_NEXT(ifs)) { |
---|
184 | index_count++; |
---|
185 | } |
---|
186 | |
---|
187 | if (index_count == 0) { |
---|
188 | fprintf(stderr, "Container '%s' has no index.\n", db_path); |
---|
189 | } |
---|
190 | else { |
---|
191 | int pass; |
---|
192 | |
---|
193 | fprintf(stderr, "Indices for '%s':\n", db_path); |
---|
194 | for (pass = 1; pass <= 2; pass++) { |
---|
195 | if (pass == 2) { |
---|
196 | fprintf(stderr, "\nDetailed index contents:\n\n"); |
---|
197 | } |
---|
198 | index_count = 0; |
---|
199 | for (ifs = GBCONTAINER_IFS(gbc); ifs; ifs = GB_INDEX_FILES_NEXT(ifs)) { |
---|
200 | fprintf(stderr, |
---|
201 | "* Index %i for key=%s (%i), entries=%li, %s\n", |
---|
202 | index_count, |
---|
203 | Main->keys[ifs->key].key, |
---|
204 | ifs->key, |
---|
205 | ifs->nr_of_elements, |
---|
206 | ifs->case_sens == GB_MIND_CASE |
---|
207 | ? "Case sensitive" |
---|
208 | : (ifs->case_sens == GB_IGNORE_CASE |
---|
209 | ? "Case insensitive" |
---|
210 | : "<Error in case_sens>") |
---|
211 | ); |
---|
212 | |
---|
213 | if (pass == 2) { |
---|
214 | struct gb_if_entries *ifes; |
---|
215 | int index; |
---|
216 | |
---|
217 | fprintf(stderr, "\n"); |
---|
218 | for (index = 0; index<ifs->hash_table_size; index++) { |
---|
219 | for (ifes = GB_ENTRIES_ENTRY(GB_INDEX_FILES_ENTRIES(ifs),index); |
---|
220 | ifes; |
---|
221 | ifes = GB_IF_ENTRIES_NEXT(ifes)) |
---|
222 | { |
---|
223 | GBDATA *igbd = GB_IF_ENTRIES_GBD(ifes); |
---|
224 | const char *data = GB_read_char_pntr(igbd); |
---|
225 | |
---|
226 | fprintf(stderr, " - '%s' (@idx=%i)\n", data, index); |
---|
227 | } |
---|
228 | } |
---|
229 | fprintf(stderr, "\n"); |
---|
230 | } |
---|
231 | index_count++; |
---|
232 | } |
---|
233 | } |
---|
234 | } |
---|
235 | } |
---|
236 | |
---|
237 | free(db_path); |
---|
238 | } |
---|
239 | |
---|
240 | #endif /* DEBUG */ |
---|
241 | |
---|
242 | |
---|
243 | /* find an entry in an hash table */ |
---|
244 | GBDATA *gb_index_find(GBCONTAINER *gbf, struct gb_index_files_struct *ifs, GBQUARK quark, const char *val, GB_CASE case_sens, int after_index){ |
---|
245 | unsigned long index; |
---|
246 | GB_CSTR data; |
---|
247 | struct gb_if_entries *ifes; |
---|
248 | GBDATA *result = 0; |
---|
249 | long min_index; |
---|
250 | |
---|
251 | if (!ifs) { |
---|
252 | GB_INDEX_FIND(gbf,ifs,quark); |
---|
253 | if (!ifs) { |
---|
254 | GB_internal_error("gb_index_find called, but no index table found"); |
---|
255 | return 0; |
---|
256 | } |
---|
257 | } |
---|
258 | |
---|
259 | if (ifs->case_sens != case_sens) { |
---|
260 | GB_internal_error("case mismatch between index and search"); |
---|
261 | return 0; |
---|
262 | } |
---|
263 | |
---|
264 | GB_CALC_HASH_INDEX(val, index, ifs->hash_table_size, ifs->case_sens); |
---|
265 | min_index = gbf->d.nheader; |
---|
266 | |
---|
267 | for ( ifes = GB_ENTRIES_ENTRY(GB_INDEX_FILES_ENTRIES(ifs),index); |
---|
268 | ifes; |
---|
269 | ifes = GB_IF_ENTRIES_NEXT(ifes)) |
---|
270 | { |
---|
271 | GBDATA *igbd = GB_IF_ENTRIES_GBD(ifes); |
---|
272 | GBCONTAINER *ifather = GB_FATHER(igbd); |
---|
273 | |
---|
274 | if ( ifather->index < after_index) continue; |
---|
275 | if ( ifather->index >= min_index) continue; |
---|
276 | data = GB_read_char_pntr(igbd); |
---|
277 | if (GBS_string_matches(data, val, case_sens)) { /* entry found */ |
---|
278 | result = igbd; |
---|
279 | min_index = ifather->index; |
---|
280 | } |
---|
281 | } |
---|
282 | return result; |
---|
283 | } |
---|
284 | |
---|
285 | |
---|
286 | /***************************************************************************************** |
---|
287 | UNDO functions |
---|
288 | ******************************************************************************************/ |
---|
289 | /* How they work: |
---|
290 | There are three undo stacks: |
---|
291 | GB_UNDO_NONE no undo |
---|
292 | GB_UNDO_UNDO normal undo stack |
---|
293 | GB_UNDO_REDO redo stack |
---|
294 | */ |
---|
295 | |
---|
296 | /***************************************************************************************** |
---|
297 | UNDO internal functions |
---|
298 | ******************************************************************************************/ |
---|
299 | |
---|
300 | char *gb_set_undo_type(GBDATA *gb_main, GB_UNDO_TYPE type){ |
---|
301 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
302 | Main->undo_type = type; |
---|
303 | return 0; |
---|
304 | } |
---|
305 | |
---|
306 | /** mallocs the main structures to control undo/redo */ |
---|
307 | |
---|
308 | void g_b_add_size_to_undo_entry(struct g_b_undo_entry_struct *ue, long size){ |
---|
309 | ue->sizeof_this += size; /* undo entry */ |
---|
310 | ue->father->sizeof_this += size; /* one undo */ |
---|
311 | ue->father->father->sizeof_this += size; /* all undos */ |
---|
312 | } |
---|
313 | |
---|
314 | struct g_b_undo_entry_struct *new_g_b_undo_entry_struct(struct g_b_undo_struct *u){ |
---|
315 | struct g_b_undo_entry_struct *ue = (struct g_b_undo_entry_struct *)gbm_get_mem( |
---|
316 | sizeof(struct g_b_undo_entry_struct), GBM_UNDO); |
---|
317 | ue->next = u->entries; |
---|
318 | ue->father = u; |
---|
319 | u->entries = ue; |
---|
320 | g_b_add_size_to_undo_entry(ue,sizeof(struct g_b_undo_entry_struct)); |
---|
321 | return ue; |
---|
322 | } |
---|
323 | |
---|
324 | |
---|
325 | |
---|
326 | void gb_init_undo_stack(struct gb_main_type *Main){ |
---|
327 | Main->undo = (struct g_b_undo_mgr_struct *)GB_calloc(sizeof(struct g_b_undo_mgr_struct),1); |
---|
328 | Main->undo->max_size_of_all_undos = GB_MAX_UNDO_SIZE; |
---|
329 | Main->undo->u = (struct g_b_undo_header_struct *) GB_calloc(sizeof(struct g_b_undo_header_struct),1); |
---|
330 | Main->undo->r = (struct g_b_undo_header_struct *) GB_calloc(sizeof(struct g_b_undo_header_struct),1); |
---|
331 | } |
---|
332 | |
---|
333 | void delete_g_b_undo_entry_struct(struct g_b_undo_entry_struct *entry){ |
---|
334 | switch (entry->type) { |
---|
335 | case GB_UNDO_ENTRY_TYPE_MODIFY: |
---|
336 | case GB_UNDO_ENTRY_TYPE_MODIFY_ARRAY: |
---|
337 | { |
---|
338 | if (entry->d.ts) { |
---|
339 | gb_del_ref_gb_transaction_save(entry->d.ts); |
---|
340 | } |
---|
341 | } |
---|
342 | default: |
---|
343 | break; |
---|
344 | } |
---|
345 | gbm_free_mem((char *)entry,sizeof (struct g_b_undo_entry_struct),GBM_UNDO); |
---|
346 | } |
---|
347 | |
---|
348 | void delete_g_b_undo_struct(struct g_b_undo_struct *u){ |
---|
349 | struct g_b_undo_entry_struct *a,*next; |
---|
350 | for (a = u->entries; a; a = next){ |
---|
351 | next = a->next; |
---|
352 | delete_g_b_undo_entry_struct(a); |
---|
353 | } |
---|
354 | free((char *)u); |
---|
355 | } |
---|
356 | |
---|
357 | void delete_g_b_undo_header_struct(struct g_b_undo_header_struct *uh){ |
---|
358 | struct g_b_undo_struct *a,*next=0; |
---|
359 | for ( a= uh->stack; a; a = next){ |
---|
360 | next = a->next; |
---|
361 | delete_g_b_undo_struct(a); |
---|
362 | } |
---|
363 | free((char *)uh); |
---|
364 | } |
---|
365 | |
---|
366 | /******************** check size *****************************/ |
---|
367 | |
---|
368 | char *g_b_check_undo_size2(struct g_b_undo_header_struct *uhs, long size, long max_cnt){ |
---|
369 | long csize = 0; |
---|
370 | long ccnt = 0; |
---|
371 | struct g_b_undo_struct *us; |
---|
372 | |
---|
373 | for (us = uhs->stack; us && us->next ; us = us->next){ |
---|
374 | csize += us->sizeof_this; |
---|
375 | ccnt ++; |
---|
376 | if ( ( (csize + us->next->sizeof_this) > size) || |
---|
377 | (ccnt >= max_cnt ) ){ /* delete the rest */ |
---|
378 | struct g_b_undo_struct *a,*next=0; |
---|
379 | |
---|
380 | for ( a = us->next; a; a = next){ |
---|
381 | next = a->next; |
---|
382 | delete_g_b_undo_struct(a); |
---|
383 | } |
---|
384 | us->next = 0; |
---|
385 | uhs->sizeof_this = csize; |
---|
386 | break; |
---|
387 | } |
---|
388 | } |
---|
389 | return 0; |
---|
390 | } |
---|
391 | |
---|
392 | char *g_b_check_undo_size(GB_MAIN_TYPE *Main){ |
---|
393 | char *error = 0; |
---|
394 | long maxsize = Main->undo->max_size_of_all_undos; |
---|
395 | error = g_b_check_undo_size2(Main->undo->u, maxsize/2,GB_MAX_UNDO_CNT); |
---|
396 | if (error) return error; |
---|
397 | error = g_b_check_undo_size2(Main->undo->r, maxsize/2,GB_MAX_REDO_CNT); |
---|
398 | if (error) return error; |
---|
399 | return 0; |
---|
400 | } |
---|
401 | |
---|
402 | |
---|
403 | void gb_free_undo_stack(struct gb_main_type *Main){ |
---|
404 | delete_g_b_undo_header_struct(Main->undo->u); |
---|
405 | delete_g_b_undo_header_struct(Main->undo->r); |
---|
406 | free((char *)Main->undo); |
---|
407 | } |
---|
408 | |
---|
409 | /***************************************************************************************** |
---|
410 | real undo (redo) |
---|
411 | ******************************************************************************************/ |
---|
412 | |
---|
413 | GB_ERROR g_b_undo_entry(GB_MAIN_TYPE *Main,struct g_b_undo_entry_struct *ue){ |
---|
414 | GB_ERROR error = 0; |
---|
415 | Main = Main; |
---|
416 | switch (ue->type) { |
---|
417 | case GB_UNDO_ENTRY_TYPE_CREATED: |
---|
418 | error = GB_delete(ue->source); |
---|
419 | break; |
---|
420 | case GB_UNDO_ENTRY_TYPE_DELETED: |
---|
421 | { |
---|
422 | GBDATA *gbd = ue->d.gs.gbd; |
---|
423 | int type = GB_TYPE(gbd); |
---|
424 | if (type == GB_DB) { |
---|
425 | gbd = (GBDATA *)gb_make_pre_defined_container((GBCONTAINER *)ue->source,(GBCONTAINER *)gbd,-1, ue->d.gs.key); |
---|
426 | }else{ |
---|
427 | gbd = gb_make_pre_defined_entry((GBCONTAINER *)ue->source,gbd,-1, ue->d.gs.key); |
---|
428 | } |
---|
429 | GB_ARRAY_FLAGS(gbd).flags = ue->flag; |
---|
430 | gb_touch_header(GB_FATHER(gbd)); |
---|
431 | gb_touch_entry((GBDATA *)gbd,gb_created); |
---|
432 | } |
---|
433 | break; |
---|
434 | case GB_UNDO_ENTRY_TYPE_MODIFY_ARRAY: |
---|
435 | case GB_UNDO_ENTRY_TYPE_MODIFY: |
---|
436 | { |
---|
437 | GBDATA *gbd = ue->source; |
---|
438 | int type = GB_TYPE(gbd); |
---|
439 | if (type == GB_DB) { |
---|
440 | |
---|
441 | }else{ |
---|
442 | gb_save_extern_data_in_ts(gbd); /* check out and free string */ |
---|
443 | |
---|
444 | if (ue->d.ts) { // nothing to undo (e.g. if undoing GB_touch) |
---|
445 | gbd->flags = ue->d.ts->flags; |
---|
446 | gbd->flags2.extern_data = ue->d.ts->flags2.extern_data; |
---|
447 | |
---|
448 | memcpy(&gbd->info,&ue->d.ts->info,sizeof(gbd->info)); /* restore old information */ |
---|
449 | if (type >= GB_BITS) { |
---|
450 | if (gbd->flags2.extern_data){ |
---|
451 | SET_GB_EXTERN_DATA_DATA(gbd->info.ex, ue->d.ts->info.ex.data); /* set relative pointers correctly */ |
---|
452 | } |
---|
453 | |
---|
454 | gb_del_ref_and_extern_gb_transaction_save(ue->d.ts); |
---|
455 | ue->d.ts = 0; |
---|
456 | |
---|
457 | GB_INDEX_CHECK_IN(gbd); |
---|
458 | } |
---|
459 | } |
---|
460 | } |
---|
461 | { |
---|
462 | struct gb_header_flags *pflags = &GB_ARRAY_FLAGS(gbd); |
---|
463 | if (pflags->flags != (unsigned)ue->flag){ |
---|
464 | GBCONTAINER *gb_father = GB_FATHER(gbd); |
---|
465 | gbd->flags.saved_flags = pflags->flags; |
---|
466 | pflags->flags = ue->flag; |
---|
467 | if (GB_FATHER(gb_father)){ |
---|
468 | gb_touch_header(gb_father); /* dont touch father of main */ |
---|
469 | } |
---|
470 | } |
---|
471 | } |
---|
472 | gb_touch_entry(gbd,gb_changed); |
---|
473 | } |
---|
474 | break; |
---|
475 | default: |
---|
476 | GB_internal_error("Undo stack corrupt:!!!"); |
---|
477 | error = GB_export_error("shit 34345"); |
---|
478 | } |
---|
479 | |
---|
480 | return error; |
---|
481 | } |
---|
482 | |
---|
483 | |
---|
484 | |
---|
485 | GB_ERROR g_b_undo(GB_MAIN_TYPE *Main, GBDATA *gb_main, struct g_b_undo_header_struct *uh){ |
---|
486 | GB_ERROR error = 0; |
---|
487 | struct g_b_undo_struct *u; |
---|
488 | struct g_b_undo_entry_struct *ue,*next; |
---|
489 | if (!uh->stack) return GB_export_error("Sorry no more undos/redos available"); |
---|
490 | |
---|
491 | GB_begin_transaction(gb_main); |
---|
492 | |
---|
493 | u=uh->stack; |
---|
494 | for (ue=u->entries; ue; ue = next) { |
---|
495 | next = ue->next; |
---|
496 | error = g_b_undo_entry(Main,ue); |
---|
497 | delete_g_b_undo_entry_struct(ue); |
---|
498 | u->entries = next; |
---|
499 | if (error) break; |
---|
500 | } |
---|
501 | uh->sizeof_this -= u->sizeof_this; /* remove undo from list */ |
---|
502 | uh->stack = u->next; |
---|
503 | delete_g_b_undo_struct(u); |
---|
504 | |
---|
505 | return GB_end_transaction(gb_main, error); |
---|
506 | } |
---|
507 | |
---|
508 | GB_CSTR g_b_read_undo_key_pntr(GB_MAIN_TYPE *Main, struct g_b_undo_entry_struct *ue){ |
---|
509 | return Main->keys[ue->d.gs.key].key; |
---|
510 | } |
---|
511 | |
---|
512 | char *g_b_undo_info(GB_MAIN_TYPE *Main, GBDATA *gb_main, struct g_b_undo_header_struct *uh){ |
---|
513 | void *res = GBS_stropen(1024); |
---|
514 | struct g_b_undo_struct *u; |
---|
515 | struct g_b_undo_entry_struct *ue; |
---|
516 | GBUSE(Main);GBUSE(gb_main); |
---|
517 | u=uh->stack; |
---|
518 | if (!u) return strdup("No more undos available"); |
---|
519 | for (ue=u->entries; ue; ue = ue->next) { |
---|
520 | switch (ue->type) { |
---|
521 | case GB_UNDO_ENTRY_TYPE_CREATED: |
---|
522 | GBS_strcat(res,"Delete new entry: "); |
---|
523 | GBS_strcat(res,gb_read_key_pntr(ue->source)); |
---|
524 | break; |
---|
525 | case GB_UNDO_ENTRY_TYPE_DELETED: |
---|
526 | GBS_strcat(res,"Rebuild deleted entry: "); |
---|
527 | GBS_strcat(res,g_b_read_undo_key_pntr(Main,ue)); |
---|
528 | break; |
---|
529 | case GB_UNDO_ENTRY_TYPE_MODIFY_ARRAY: |
---|
530 | case GB_UNDO_ENTRY_TYPE_MODIFY: |
---|
531 | GBS_strcat(res,"Undo modified entry: "); |
---|
532 | GBS_strcat(res,gb_read_key_pntr(ue->source)); |
---|
533 | break; |
---|
534 | default: |
---|
535 | break; |
---|
536 | } |
---|
537 | GBS_chrcat(res,'\n'); |
---|
538 | } |
---|
539 | return GBS_strclose(res); |
---|
540 | } |
---|
541 | |
---|
542 | /***************************************************************************************** |
---|
543 | UNDO exported functions (to ARBDB) |
---|
544 | ******************************************************************************************/ |
---|
545 | |
---|
546 | /** start a new undoable transaction */ |
---|
547 | char *gb_set_undo_sync(GBDATA *gb_main) |
---|
548 | { |
---|
549 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
550 | char *error = g_b_check_undo_size(Main); |
---|
551 | struct g_b_undo_header_struct *uhs; |
---|
552 | if (error) return error; |
---|
553 | switch (Main->requested_undo_type) { /* init the target undo stack */ |
---|
554 | case GB_UNDO_UNDO: /* that will undo but delete all redos */ |
---|
555 | uhs = Main->undo->u; |
---|
556 | break; |
---|
557 | case GB_UNDO_UNDO_REDO: uhs = Main->undo->u; break; |
---|
558 | case GB_UNDO_REDO: uhs = Main->undo->r; break; |
---|
559 | case GB_UNDO_KILL: gb_free_all_undos(gb_main); |
---|
560 | default: uhs = 0; |
---|
561 | } |
---|
562 | if (uhs) |
---|
563 | { |
---|
564 | struct g_b_undo_struct *u = (struct g_b_undo_struct *) GB_calloc(sizeof(struct g_b_undo_struct) , 1); |
---|
565 | u->next = uhs->stack; |
---|
566 | u->father = uhs; |
---|
567 | uhs->stack = u; |
---|
568 | Main->undo->valid_u = u; |
---|
569 | } |
---|
570 | |
---|
571 | return gb_set_undo_type(gb_main,Main->requested_undo_type); |
---|
572 | } |
---|
573 | |
---|
574 | /* Remove all existing undos/redos */ |
---|
575 | char *gb_free_all_undos(GBDATA *gb_main){ |
---|
576 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
577 | struct g_b_undo_struct *a,*next; |
---|
578 | for ( a= Main->undo->r->stack; a; a = next){ |
---|
579 | next = a->next; |
---|
580 | delete_g_b_undo_struct(a); |
---|
581 | } |
---|
582 | Main->undo->r->stack = 0; |
---|
583 | Main->undo->r->sizeof_this = 0; |
---|
584 | |
---|
585 | for ( a= Main->undo->u->stack; a; a = next){ |
---|
586 | next = a->next; |
---|
587 | delete_g_b_undo_struct(a); |
---|
588 | } |
---|
589 | Main->undo->u->stack = 0; |
---|
590 | Main->undo->u->sizeof_this = 0; |
---|
591 | return 0; |
---|
592 | } |
---|
593 | |
---|
594 | |
---|
595 | /* called to finish an undoable section, called at end of gb_commit_transaction */ |
---|
596 | char *gb_disable_undo(GBDATA *gb_main){ |
---|
597 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
598 | struct g_b_undo_struct *u = Main->undo->valid_u; |
---|
599 | if (!u) return 0; |
---|
600 | if (!u->entries){ /* nothing to undo, just a read transaction */ |
---|
601 | u->father->stack = u->next; |
---|
602 | delete_g_b_undo_struct(u); |
---|
603 | }else{ |
---|
604 | if (Main->requested_undo_type == GB_UNDO_UNDO) { /* remove all redos*/ |
---|
605 | struct g_b_undo_struct *a,*next; |
---|
606 | for ( a= Main->undo->r->stack; a; a = next){ |
---|
607 | next = a->next; |
---|
608 | delete_g_b_undo_struct(a); |
---|
609 | } |
---|
610 | Main->undo->r->stack = 0; |
---|
611 | Main->undo->r->sizeof_this = 0; |
---|
612 | } |
---|
613 | } |
---|
614 | Main->undo->valid_u = 0; |
---|
615 | return gb_set_undo_type(gb_main,GB_UNDO_NONE); |
---|
616 | } |
---|
617 | |
---|
618 | void gb_check_in_undo_create(GB_MAIN_TYPE *Main,GBDATA *gbd){ |
---|
619 | struct g_b_undo_entry_struct *ue; |
---|
620 | if (!Main->undo->valid_u) return; |
---|
621 | ue = new_g_b_undo_entry_struct(Main->undo->valid_u); |
---|
622 | ue->type = GB_UNDO_ENTRY_TYPE_CREATED; |
---|
623 | ue->source = gbd; |
---|
624 | ue->gbm_index = GB_GBM_INDEX(gbd); |
---|
625 | ue->flag = 0; |
---|
626 | } |
---|
627 | |
---|
628 | void gb_check_in_undo_modify(GB_MAIN_TYPE *Main,GBDATA *gbd){ |
---|
629 | long type = GB_TYPE(gbd); |
---|
630 | struct g_b_undo_entry_struct *ue; |
---|
631 | struct gb_transaction_save *old; |
---|
632 | |
---|
633 | if (!Main->undo->valid_u){ |
---|
634 | GB_FREE_TRANSACTION_SAVE(gbd); |
---|
635 | return; |
---|
636 | } |
---|
637 | |
---|
638 | old = GB_GET_EXT_OLD_DATA(gbd); |
---|
639 | ue = new_g_b_undo_entry_struct(Main->undo->valid_u); |
---|
640 | ue->source = gbd; |
---|
641 | ue->gbm_index = GB_GBM_INDEX(gbd); |
---|
642 | ue->type = GB_UNDO_ENTRY_TYPE_MODIFY; |
---|
643 | ue->flag = gbd->flags.saved_flags; |
---|
644 | |
---|
645 | if (type != GB_DB) { |
---|
646 | ue->d.ts = old; |
---|
647 | if (old) { |
---|
648 | gb_add_ref_gb_transaction_save(old); |
---|
649 | if (type >= GB_BITS && old->flags2.extern_data && old->info.ex.data ) { |
---|
650 | ue->type = GB_UNDO_ENTRY_TYPE_MODIFY_ARRAY; |
---|
651 | /* move external array from ts to undo entry struct */ |
---|
652 | g_b_add_size_to_undo_entry(ue,old->info.ex.memsize); |
---|
653 | } |
---|
654 | } |
---|
655 | } |
---|
656 | } |
---|
657 | |
---|
658 | #if defined(DEVEL_RALF) |
---|
659 | #warning change param for gb_check_in_undo_delete to GBDATA ** |
---|
660 | #endif /* DEVEL_RALF */ |
---|
661 | |
---|
662 | void gb_check_in_undo_delete(GB_MAIN_TYPE *Main,GBDATA *gbd, int deep){ |
---|
663 | long type = GB_TYPE(gbd); |
---|
664 | struct g_b_undo_entry_struct *ue; |
---|
665 | if (!Main->undo->valid_u){ |
---|
666 | gb_delete_entry(&gbd); |
---|
667 | return; |
---|
668 | } |
---|
669 | |
---|
670 | if (type == GB_DB) { |
---|
671 | int index; |
---|
672 | GBDATA *gbd2; |
---|
673 | GBCONTAINER *gbc = ((GBCONTAINER *) gbd); |
---|
674 | |
---|
675 | for (index = 0; (index < gbc->d.nheader); index++) { |
---|
676 | if (( gbd2 = GBCONTAINER_ELEM(gbc,index) )) { |
---|
677 | gb_check_in_undo_delete(Main,gbd2,deep+1); |
---|
678 | } |
---|
679 | }; |
---|
680 | }else{ |
---|
681 | GB_INDEX_CHECK_OUT(gbd); |
---|
682 | gbd->flags2.tisa_index = 0; /* never check in again */ |
---|
683 | } |
---|
684 | gb_abort_entry(gbd); /* get old version */ |
---|
685 | |
---|
686 | ue = new_g_b_undo_entry_struct(Main->undo->valid_u); |
---|
687 | ue->type = GB_UNDO_ENTRY_TYPE_DELETED; |
---|
688 | ue->source = (GBDATA *)GB_FATHER(gbd); |
---|
689 | ue->gbm_index = GB_GBM_INDEX(gbd); |
---|
690 | ue->flag = GB_ARRAY_FLAGS(gbd).flags; |
---|
691 | |
---|
692 | ue->d.gs.gbd = gbd; |
---|
693 | ue->d.gs.key = GB_KEY_QUARK(gbd); |
---|
694 | |
---|
695 | gb_pre_delete_entry(gbd); /* get the core of the entry */ |
---|
696 | |
---|
697 | if (type == GB_DB) { |
---|
698 | g_b_add_size_to_undo_entry(ue,sizeof(GBCONTAINER)); |
---|
699 | }else{ |
---|
700 | if (type >= GB_BITS && gbd->flags2.extern_data) { |
---|
701 | /* we have copied the data structures, now |
---|
702 | mark the old as deleted !!! */ |
---|
703 | g_b_add_size_to_undo_entry(ue,GB_GETMEMSIZE(gbd)); |
---|
704 | } |
---|
705 | g_b_add_size_to_undo_entry(ue,sizeof(GBDATA)); |
---|
706 | } |
---|
707 | } |
---|
708 | |
---|
709 | /***************************************************************************************** |
---|
710 | UNDO exported functions (to USER) |
---|
711 | ******************************************************************************************/ |
---|
712 | |
---|
713 | |
---|
714 | /** define how to undo the next items, this function should be called just before opening |
---|
715 | a transaction, otherwise it's affect will be delayed |
---|
716 | posissible types are: GB_UNDO_UNDO enable undo |
---|
717 | * GB_UNDO_NONE disable undo |
---|
718 | * GB_UNDO_KILL disable undo and remove old undos !! |
---|
719 | */ |
---|
720 | GB_ERROR GB_request_undo_type(GBDATA *gb_main, GB_UNDO_TYPE type){ |
---|
721 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
722 | Main->requested_undo_type = type; |
---|
723 | if (!Main->local_mode) { |
---|
724 | if (type == GB_UNDO_NONE || type == GB_UNDO_KILL) { |
---|
725 | return gbcmc_send_undo_commands(gb_main,_GBCMC_UNDOCOM_REQUEST_NOUNDO); |
---|
726 | }else{ |
---|
727 | return gbcmc_send_undo_commands(gb_main,_GBCMC_UNDOCOM_REQUEST_UNDO); |
---|
728 | } |
---|
729 | } |
---|
730 | return 0; |
---|
731 | } |
---|
732 | |
---|
733 | GB_UNDO_TYPE GB_get_requested_undo_type(GBDATA *gb_main){ |
---|
734 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
735 | return Main->requested_undo_type; |
---|
736 | } |
---|
737 | |
---|
738 | |
---|
739 | /** undo/redo the last transaction */ |
---|
740 | GB_ERROR GB_undo(GBDATA *gb_main,GB_UNDO_TYPE type) { |
---|
741 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
742 | GB_UNDO_TYPE old_type = GB_get_requested_undo_type(gb_main); |
---|
743 | GB_ERROR error = 0; |
---|
744 | if (!Main->local_mode) { |
---|
745 | switch (type) { |
---|
746 | case GB_UNDO_UNDO: |
---|
747 | return gbcmc_send_undo_commands(gb_main,_GBCMC_UNDOCOM_UNDO); |
---|
748 | case GB_UNDO_REDO: |
---|
749 | return gbcmc_send_undo_commands(gb_main,_GBCMC_UNDOCOM_REDO); |
---|
750 | default: GB_internal_error("unknown undo type in GB_undo"); |
---|
751 | return GB_export_error("Internal UNDO error"); |
---|
752 | } |
---|
753 | } |
---|
754 | switch (type){ |
---|
755 | case GB_UNDO_UNDO: |
---|
756 | GB_request_undo_type(gb_main,GB_UNDO_REDO); |
---|
757 | error = g_b_undo(Main,gb_main,Main->undo->u); |
---|
758 | GB_request_undo_type(gb_main,old_type); |
---|
759 | break; |
---|
760 | case GB_UNDO_REDO: |
---|
761 | GB_request_undo_type(gb_main,GB_UNDO_UNDO_REDO); |
---|
762 | error = g_b_undo(Main,gb_main,Main->undo->r); |
---|
763 | GB_request_undo_type(gb_main,old_type); |
---|
764 | break; |
---|
765 | default: |
---|
766 | error = GB_export_error("GB_undo: unknown undo type specified"); |
---|
767 | } |
---|
768 | return error; |
---|
769 | } |
---|
770 | |
---|
771 | |
---|
772 | /** get some information about the next undo */ |
---|
773 | char *GB_undo_info(GBDATA *gb_main,GB_UNDO_TYPE type) { |
---|
774 | GB_MAIN_TYPE *Main = GB_MAIN(gb_main); |
---|
775 | if (!Main->local_mode) { |
---|
776 | switch (type) { |
---|
777 | case GB_UNDO_UNDO: |
---|
778 | return gbcmc_send_undo_info_commands(gb_main,_GBCMC_UNDOCOM_INFO_UNDO); |
---|
779 | case GB_UNDO_REDO: |
---|
780 | return gbcmc_send_undo_info_commands(gb_main,_GBCMC_UNDOCOM_INFO_REDO); |
---|
781 | default: |
---|
782 | GB_internal_error("unknown undo type in GB_undo"); |
---|
783 | GB_export_error("Internal UNDO error"); |
---|
784 | return 0; |
---|
785 | } |
---|
786 | } |
---|
787 | switch (type){ |
---|
788 | case GB_UNDO_UNDO: |
---|
789 | return g_b_undo_info(Main,gb_main,Main->undo->u); |
---|
790 | case GB_UNDO_REDO: |
---|
791 | return g_b_undo_info(Main,gb_main,Main->undo->r); |
---|
792 | default: |
---|
793 | GB_export_error("GB_undo_info: unknown undo type specified"); |
---|
794 | return 0; |
---|
795 | } |
---|
796 | } |
---|
797 | |
---|
798 | /** set the maxmimum memory used for undoing */ |
---|
799 | GB_ERROR GB_set_undo_mem(GBDATA *gbd, long memsize){ |
---|
800 | GB_MAIN_TYPE *Main = GB_MAIN(gbd); |
---|
801 | if (memsize < _GBCMC_UNDOCOM_SET_MEM){ |
---|
802 | return GB_export_errorf("Not enough UNDO memory specified: should be more than %i", |
---|
803 | _GBCMC_UNDOCOM_SET_MEM); |
---|
804 | } |
---|
805 | Main->undo->max_size_of_all_undos = memsize; |
---|
806 | if (!Main->local_mode) { |
---|
807 | return gbcmc_send_undo_commands(gbd,(enum gb_undo_commands)memsize); |
---|
808 | } |
---|
809 | g_b_check_undo_size(Main); |
---|
810 | return 0; |
---|
811 | } |
---|
812 | |
---|