1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27 """
28 This module provides an implementation of the Table element that uses a
29 database engine for storage. On top of that it then re-implements a number
30 of the tables from the lsctables module to provide versions of their
31 methods that work against the SQL database.
32 """
33
34
35 import itertools
36 import operator
37 import os
38 import re
39 import shutil
40 import signal
41 import sys
42 import tempfile
43 import threading
44 from xml.sax.xmlreader import AttributesImpl
45 import warnings
46
47
48 from glue import git_version
49 from glue import offsetvector
50 from glue import segments
51 from . import ilwd
52 from . import ligolw
53 from . import table
54 from . import lsctables
55 from . import types as ligolwtypes
56 import six
57
58
59 __author__ = "Kipp Cannon <kipp.cannon@ligo.org>"
60 __version__ = "git id %s" % git_version.id
61 __date__ = git_version.date
62
63
64
65
66
67
68
69
70
71
72
74 """
75 A totally broken attempt to determine what type of database a
76 connection object is attached to. Don't use this.
77
78 The input is a DB API 2.0 compliant connection object, the return
79 value is one of the strings "sqlite3" or "mysql". Raises TypeError
80 when the database type cannot be determined.
81 """
82 if "sqlite" in repr(connection):
83 return "sqlite"
84 if "mysql" in repr(connection):
85 return "mysql"
86 raise TypeError(connection)
87
88
89
90
91
92
93
94
95
96 temporary_files = {}
97 temporary_files_lock = threading.Lock()
98
99
100
101
102
103
104
105
106
107 origactions = {}
108
109
111 """
112 Installs a signal handler to erase temporary scratch files when a
113 signal is received. This can be used to help ensure scratch files
114 are erased when jobs are evicted by Condor. signums is a squence
115 of the signals to trap, the default value is a list of the signals
116 used by Condor to kill and/or evict jobs.
117
118 The logic is as follows. If the current signal handler is
119 signal.SIG_IGN, i.e. the signal is being ignored, then the signal
120 handler is not modified since the reception of that signal would
121 not normally cause a scratch file to be leaked. Otherwise a signal
122 handler is installed that erases the scratch files. If the
123 original signal handler was a Python callable, then after the
124 scratch files are erased the original signal handler will be
125 invoked. If program control returns from that handler, i.e. that
126 handler does not cause the interpreter to exit, then sys.exit() is
127 invoked and retval is returned to the shell as the exit code.
128
129 Note: by invoking sys.exit(), the signal handler causes the Python
130 interpreter to do a normal shutdown. That means it invokes
131 atexit() handlers, and does other garbage collection tasks that it
132 normally would not do when killed by a signal.
133
134 Note: this function will not replace a signal handler more than
135 once, that is if it has already been used to set a handler
136 on a signal then it will be a no-op when called again for that
137 signal until uninstall_signal_trap() is used to remove the handler
138 from that signal.
139
140 Note: this function is called by get_connection_filename()
141 whenever it creates a scratch file.
142 """
143
144
145 signums = set(signums) - set(origactions)
146
147 def temporary_file_cleanup_on_signal(signum, frame):
148 with temporary_files_lock:
149 temporary_files.clear()
150 if callable(origactions[signum]):
151
152 return origactions[signum](signum, frame)
153
154
155 sys.exit(retval)
156
157 for signum in signums:
158 origactions[signum] = signal.getsignal(signum)
159 if origactions[signum] != signal.SIG_IGN:
160
161
162 signal.signal(signum, temporary_file_cleanup_on_signal)
163
164
166 """
167 Undo the effects of install_signal_trap(). Restores the original
168 signal handlers. If signums is a sequence of signal numbers only
169 the signal handlers for those signals will be restored (KeyError
170 will be raised if one of them is not one that install_signal_trap()
171 installed a handler for, in which case some undefined number of
172 handlers will have been restored). If signums is None (the
173 default) then all signals that have been modified by previous calls
174 to install_signal_trap() are restored.
175
176 Note: this function is called by put_connection_filename() and
177 discard_connection_filename() whenever they remove a scratch file
178 and there are then no more scrach files in use.
179 """
180
181 if signums is None:
182 signums = list(origactions.keys())
183 for signum in signums:
184 signal.signal(signum, origactions.pop(signum))
185
186
187
188
189
190
191
193 """
194 Utility code for moving database files to a (presumably local)
195 working location for improved performance and reduced fileserver
196 load.
197 """
198 def mktmp(path, suffix = ".sqlite", verbose = False):
199 with temporary_files_lock:
200
201 install_signal_trap()
202
203
204 temporary_file = tempfile.NamedTemporaryFile(suffix = suffix, dir = path if path != "_CONDOR_SCRATCH_DIR" else os.getenv("_CONDOR_SCRATCH_DIR"))
205 def new_unlink(self, orig_unlink = temporary_file.unlink):
206
207 try:
208 orig_unlink("%s-journal" % self)
209 except:
210 pass
211 orig_unlink(self)
212 temporary_file.unlink = new_unlink
213 filename = temporary_file.name
214
215 temporary_files[filename] = temporary_file
216 if verbose:
217 sys.stderr.write("using '%s' as workspace\n" % filename)
218
219
220
221
222 umsk = os.umask(0o777)
223 os.umask(umsk)
224 os.chmod(filename, 0o666 & ~umsk)
225 return filename
226
227 def truncate(filename, verbose = False):
228 if verbose:
229 sys.stderr.write("'%s' exists, truncating ... " % filename)
230 try:
231 fd = os.open(filename, os.O_WRONLY | os.O_TRUNC)
232 except Exception as e:
233 if verbose:
234 sys.stderr.write("cannot truncate '%s': %s\n" % (filename, str(e)))
235 return
236 os.close(fd)
237 if verbose:
238 sys.stderr.write("done.\n")
239
240 def cpy(srcname, dstname, verbose = False):
241 if verbose:
242 sys.stderr.write("copying '%s' to '%s' ... " % (srcname, dstname))
243 shutil.copy2(srcname, dstname)
244 if verbose:
245 sys.stderr.write("done.\n")
246 try:
247
248
249
250
251
252 shutil.copystat(srcname, dstname)
253 except Exception as e:
254 if verbose:
255 sys.stderr.write("warning: ignoring failure to copy permission bits from '%s' to '%s': %s\n" % (filename, target, str(e)))
256
257 database_exists = os.access(filename, os.F_OK)
258
259 if tmp_path is not None:
260
261
262 target = mktmp(tmp_path, suffix = ".".join(os.path.split(filename)[-1].split(".")[1:]), verbose = verbose)
263 if database_exists:
264 if replace_file:
265
266
267
268 truncate(filename, verbose = verbose)
269 else:
270
271
272 i = 1
273 while True:
274 try:
275 cpy(filename, target, verbose = verbose)
276 except IOError as e:
277 import errno
278 import time
279 if e.errno not in (errno.EPERM, errno.ENOSPC):
280
281
282
283 raise
284 if i < 5:
285 if verbose:
286 sys.stderr.write("warning: attempt %d: %s, sleeping and trying again ...\n" % (i, errno.errorcode[e.errno]))
287 time.sleep(10)
288 i += 1
289 continue
290 if verbose:
291 sys.stderr.write("warning: attempt %d: %s: working with original file '%s'\n" % (i, errno.errorcode[e.errno], filename))
292 with temporary_files_lock:
293 del temporary_files[target]
294 target = filename
295 break
296 else:
297 with temporary_files_lock:
298 if filename in temporary_files:
299 raise ValueError("file '%s' appears to be in use already as a temporary database file and is to be deleted" % filename)
300 target = filename
301 if database_exists and replace_file:
302 truncate(target, verbose = verbose)
303
304 del mktmp
305 del truncate
306 del cpy
307
308 return target
309
310
312 """
313 Sets the temp_store_directory parameter in sqlite.
314 """
315 if temp_store_directory == "_CONDOR_SCRATCH_DIR":
316 temp_store_directory = os.getenv("_CONDOR_SCRATCH_DIR")
317 if verbose:
318 sys.stderr.write("setting the temp_store_directory to %s ... " % temp_store_directory)
319 cursor = connection.cursor()
320 cursor.execute("PRAGMA temp_store_directory = '%s'" % temp_store_directory)
321 cursor.close()
322 if verbose:
323 sys.stderr.write("done\n")
324
325
327 """
328 This function reverses the effect of a previous call to
329 get_connection_filename(), restoring the working copy to its
330 original location if the two are different. This function should
331 always be called after calling get_connection_filename() when the
332 file is no longer in use.
333
334 During the move operation, this function traps the signals used by
335 Condor to evict jobs. This reduces the risk of corrupting a
336 document by the job terminating part-way through the restoration of
337 the file to its original location. When the move operation is
338 concluded, the original signal handlers are restored and if any
339 signals were trapped they are resent to the current process in
340 order. Typically this will result in the signal handlers installed
341 by the install_signal_trap() function being invoked, meaning any
342 other scratch files that might be in use get deleted and the
343 current process is terminated.
344 """
345 if working_filename != filename:
346
347 deferred_signals = []
348 def newsigterm(signum, frame):
349 deferred_signals.append(signum)
350 oldhandlers = {}
351 for sig in (signal.SIGTERM, signal.SIGTSTP):
352 oldhandlers[sig] = signal.getsignal(sig)
353 signal.signal(sig, newsigterm)
354
355
356 if verbose:
357 sys.stderr.write("moving '%s' to '%s' ... " % (working_filename, filename))
358 shutil.move(working_filename, filename)
359 if verbose:
360 sys.stderr.write("done.\n")
361
362
363
364
365
366
367
368
369 try:
370 open(working_filename, "w").close()
371 except:
372 pass
373 with temporary_files_lock:
374 del temporary_files[working_filename]
375
376
377
378 for sig, oldhandler in six.iteritems(oldhandlers):
379 signal.signal(sig, oldhandler)
380 while deferred_signals:
381 os.kill(os.getpid(), deferred_signals.pop(0))
382
383
384
385 with temporary_files_lock:
386 if not temporary_files:
387 uninstall_signal_trap()
388
389
391 """
392 Like put_connection_filename(), but the working copy is simply
393 deleted instead of being copied back to its original location.
394 This is a useful performance boost if it is known that no
395 modifications were made to the file, for example if queries were
396 performed but no updates.
397
398 Note that the file is not deleted if the working copy and original
399 file are the same, so it is always safe to call this function after
400 a call to get_connection_filename() even if a separate working copy
401 is not created.
402 """
403 if working_filename == filename:
404 return
405 with temporary_files_lock:
406 if verbose:
407 sys.stderr.write("removing '%s' ... " % working_filename)
408
409 del temporary_files[working_filename]
410 if verbose:
411 sys.stderr.write("done.")
412
413
414 if not temporary_files:
415 uninstall_signal_trap()
416
417
418
419
420
421
422
423
424
425
426
428 """
429 Create the _idmap_ table. This table has columns "old" and "new"
430 containing text strings mapping old IDs to new IDs. The old column
431 is a primary key (is indexed and must contain unique entries). The
432 table is created as a temporary table, so it will be automatically
433 dropped when the database connection is closed.
434
435 This function is for internal use, it forms part of the code used
436 to re-map row IDs when merging multiple documents.
437 """
438 connection.cursor().execute("CREATE TEMPORARY TABLE _idmap_ (old TEXT PRIMARY KEY NOT NULL, new TEXT NOT NULL)")
439
440
442 """
443 Erase the contents of the _idmap_ table, but leave the table in
444 place.
445
446 This function is for internal use, it forms part of the code used
447 to re-map row IDs when merging multiple documents.
448 """
449 connection.cursor().execute("DELETE FROM _idmap_")
450
451
462
463
465 """
466 From the old ID string, obtain a replacement ID string by either
467 grabbing it from the _idmap_ table if one has already been assigned
468 to the old ID, or by using the current value of the Table
469 instance's next_id class attribute. In the latter case, the new ID
470 is recorded in the _idmap_ table, and the class attribute
471 incremented by 1.
472
473 This function is for internal use, it forms part of the code used
474 to re-map row IDs when merging multiple documents.
475 """
476 cursor = connection.cursor()
477 cursor.execute("SELECT new FROM _idmap_ WHERE old == ?", (old,))
478 new = cursor.fetchone()
479 if new is not None:
480
481 return ilwd.ilwdchar(new[0])
482
483
484 new = tbl.get_next_id()
485 cursor.execute("INSERT INTO _idmap_ VALUES (?, ?)", (old, new))
486 return new
487
488
490 """
491 Given an ilwd:char ID class, return the highest ID from the table
492 for whose IDs that is the class.
493
494 Example:
495
496 >>> event_id = ilwd.ilwdchar("sngl_burst:event_id:0")
497 >>> print(event_id)
498 sngl_inspiral:event_id:0
499 >>> max_id = get_max_id(connection, type(event_id))
500 >>> print(max_id)
501 sngl_inspiral:event_id:1054
502 """
503 cursor = connection.cursor()
504 cursor.execute("SELECT MAX(CAST(SUBSTR(%s, %d, 10) AS INTEGER)) FROM %s" % (id_class.column_name, id_class.index_offset + 1, id_class.table_name))
505 maxid = cursor.fetchone()[0]
506 cursor.close()
507 if maxid is None:
508 return None
509 return id_class(maxid)
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526 _sql_create_table_pattern = re.compile(r"CREATE\s+TABLE\s+(?P<name>\w+)\s*\((?P<coldefs>.*)\)", re.IGNORECASE)
527 _sql_coldef_pattern = re.compile(r"\s*(?P<name>\w+)\s+(?P<type>\w+)[^,]*")
528
529
530
531
532
533
534
536 """
537 Return a list of the table names in the database.
538 """
539 cursor = connection.cursor()
540 cursor.execute("SELECT name FROM sqlite_master WHERE type == 'table'")
541 return [name for (name,) in cursor]
542
543
545 """
546 Return an in order list of (name, type) tuples describing the
547 columns in the given table.
548 """
549 cursor = connection.cursor()
550 cursor.execute("SELECT sql FROM sqlite_master WHERE type == 'table' AND name == ?", (table_name,))
551 statement, = cursor.fetchone()
552 coldefs = re.match(_sql_create_table_pattern, statement).groupdict()["coldefs"]
553 return [(coldef.groupdict()["name"], coldef.groupdict()["type"]) for coldef in re.finditer(_sql_coldef_pattern, coldefs) if coldef.groupdict()["name"].upper() not in ("PRIMARY", "UNIQUE", "CHECK")]
554
555
556 -def get_xml(connection, table_names = None):
589
590
591
592
593
594
595
596
597
598
599
601 """
602 A special version of the Table class using an SQL database for
603 storage. Many of the features of the Table class are not available
604 here, but instead the user can use SQL to query the table's
605 contents.
606
607 The constraints attribute can be set to a text string that will be
608 added to the table's CREATE statement where constraints go, for
609 example you might wish to set this to "PRIMARY KEY (event_id)" for
610 a table with an event_id column.
611
612 Note: because the table is stored in an SQL database, the use of
613 this class imposes the restriction that table names be unique
614 within a document.
615
616 Also note that at the present time there is really only proper
617 support for the pre-defined tables in the lsctables module. It is
618 possible to load unrecognized tables into a database from LIGO
619 Light Weight XML files, but without developer intervention there is
620 no way to indicate the constraints that should be imposed on the
621 columns, for example which columns should be used as primary keys
622 and so on. This can result in poor query performance. It is also
623 possible to extract a database' contents to a LIGO Light Weight XML
624 file even when the database contains unrecognized tables, but
625 without developer intervention the column types will be guessed
626 using a generic mapping of SQL types to LIGO Light Weight types.
627
628 Each instance of this class must be connected to a database. The
629 (Python DBAPI 2.0 compatible) connection object is passed to the
630 class via the connection parameter at instance creation time.
631
632 Example:
633
634 >>> import sqlite3
635 >>> connection = sqlite3.connection()
636 >>> tbl = dbtables.DBTable(AttributesImpl({u"Name": u"process:table"}), connection = connection)
637
638 A custom content handler must be created in order to pass the
639 connection keyword argument to the DBTable class when instances are
640 created, since the default content handler does not do this. See
641 the use_in() function defined in this module for information on how
642 to create such a content handler
643
644 If a custom glue.ligolw.Table subclass is defined in
645 glue.ligolw.lsctables whose name matches the name of the DBTable
646 being constructed, the lsctables class is added to the list of
647 parent classes. This allows the lsctables class' methods to be
648 used with the DBTable instances but not all of the methods will
649 necessarily work with the database-backed version of the class.
650 Your mileage may vary.
651 """
652 - def __new__(cls, *args, **kwargs):
676
677
678
679
680 TableByName[name] = CustomDBTable
681
682
683 cls = CustomDBTable
684 return table.Table.__new__(cls, *args)
685
695
696 - def copy(self, *args, **kwargs):
697 """
698 This method is not implemented. See
699 glue.ligolw.table.Table for more information.
700 """
701 raise NotImplemented
702
704 table.Table._end_of_columns(self)
705
706
707 if self.loadcolumns is not None:
708 self.dbcolumnnames = [name for name in self.columnnames if name in self.loadcolumns]
709 self.dbcolumntypes = [name for i, name in enumerate(self.columntypes) if self.columnnames[i] in self.loadcolumns]
710 else:
711 self.dbcolumnnames = self.columnnames
712 self.dbcolumntypes = self.columntypes
713
714
715 ToSQLType = {
716 "sqlite": ligolwtypes.ToSQLiteType,
717 "mysql": ligolwtypes.ToMySQLType
718 }[connection_db_type(self.connection)]
719 try:
720 statement = "CREATE TABLE IF NOT EXISTS " + self.Name + " (" + ", ".join(map(lambda n, t: "%s %s" % (n, ToSQLType[t]), self.dbcolumnnames, self.dbcolumntypes))
721 except KeyError as e:
722 raise ValueError("column type '%s' not supported" % str(e))
723 if self.constraints is not None:
724 statement += ", " + self.constraints
725 statement += ")"
726 self.cursor.execute(statement)
727
728
729 self.last_maxrowid = self.maxrowid() or 0
730
731
732 params = {
733 "sqlite": ",".join("?" * len(self.dbcolumnnames)),
734 "mysql": ",".join(["%s"] * len(self.dbcolumnnames))
735 }[connection_db_type(self.connection)]
736 self.append_statement = "INSERT INTO %s (%s) VALUES (%s)" % (self.Name, ",".join(self.dbcolumnnames), params)
737 self.append_attrgetter = operator.attrgetter(*self.dbcolumnnames)
738
743
752
754 self.cursor.execute("SELECT MAX(ROWID) FROM %s" % self.Name)
755 return self.cursor.fetchone()[0]
756
758 self.cursor.execute("SELECT COUNT(*) FROM %s" % self.Name)
759 return self.cursor.fetchone()[0]
760
766
767
768
769
770
771
772
773
775 """
776 Standard .append() method. This method is for intended for
777 internal use only.
778 """
779 self.cursor.execute(self.append_statement, self.append_attrgetter(row))
780
782 """
783 Replacement for the standard .append() method. This
784 version performs on the fly row ID reassignment, and so
785 also performs the function of the updateKeyMapping()
786 method. SQLite does not permit the PRIMARY KEY of a row to
787 be modified, so it needs to be done prior to insertion.
788 This method is intended for internal use only.
789 """
790 if self.next_id is not None:
791
792
793 setattr(row, self.next_id.column_name, idmap_get_new(self.connection, getattr(row, self.next_id.column_name), self))
794 self._append(row)
795
796 append = _append
797
799 """
800 Given an iterable of values in the order of columns in the
801 database, construct and return a row object. This is a
802 convenience function for turning the results of database
803 queries into Python objects.
804 """
805 row = self.RowType()
806 for c, t, v in zip(self.dbcolumnnames, self.dbcolumntypes, values):
807 if t in ligolwtypes.IDTypes:
808 v = ilwd.ilwdchar(v)
809 setattr(row, c, v)
810 return row
811
812 _row_from_cols = row_from_cols
813
818
820 """
821 Used as the second half of the key reassignment algorithm.
822 Loops over each row in the table, replacing references to
823 old row keys with the new values from the _idmap_ table.
824 """
825 assignments = ", ".join("%s = (SELECT new FROM _idmap_ WHERE old == %s)" % (colname, colname) for coltype, colname in zip(self.dbcolumntypes, self.dbcolumnnames) if coltype in ligolwtypes.IDTypes and (self.next_id is None or colname != self.next_id.column_name))
826 if assignments:
827
828
829
830
831
832
833
834 self.cursor.execute("UPDATE %s SET %s WHERE ROWID > %d" % (self.Name, assignments, self.last_maxrowid))
835 self.last_maxrowid = self.maxrowid() or 0
836
837
838
839
840
841
842
843
844
845
846
859
860
862 tableName = lsctables.TimeSlideTable.tableName
863 validcolumns = lsctables.TimeSlideTable.validcolumns
864 constraints = lsctables.TimeSlideTable.constraints
865 next_id = lsctables.TimeSlideTable.next_id
866 RowType = lsctables.TimeSlideTable.RowType
867 how_to_index = lsctables.TimeSlideTable.how_to_index
868
875
876 - def get_time_slide_id(self, offsetdict, create_new = None, superset_ok = False, nonunique_ok = False):
877 """
878 Return the time_slide_id corresponding to the offset vector
879 described by offsetdict, a dictionary of instrument/offset
880 pairs.
881
882 If the optional create_new argument is None (the default),
883 then the table must contain a matching offset vector. The
884 return value is the ID of that vector. If the table does
885 not contain a matching offset vector then KeyError is
886 raised.
887
888 If the optional create_new argument is set to a Process
889 object (or any other object with a process_id attribute),
890 then if the table does not contain a matching offset vector
891 a new one will be added to the table and marked as having
892 been created by the given process. The return value is the
893 ID of the (possibly newly created) matching offset vector.
894
895 If the optional superset_ok argument is False (the default)
896 then an offset vector in the table is considered to "match"
897 the requested offset vector only if they contain the exact
898 same set of instruments. If the superset_ok argument is
899 True, then an offset vector in the table is considered to
900 match the requested offset vector as long as it provides
901 the same offsets for the same instruments as the requested
902 vector, even if it provides offsets for other instruments
903 as well.
904
905 More than one offset vector in the table might match the
906 requested vector. If the optional nonunique_ok argument is
907 False (the default), then KeyError will be raised if more
908 than one offset vector in the table is found to match the
909 requested vector. If the optional nonunique_ok is True
910 then the return value is the ID of one of the matching
911 offset vectors selected at random.
912 """
913
914 if superset_ok:
915 ids = [id for id, slide in self.as_dict().items() if offsetdict == dict((instrument, offset) for instrument, offset in slide.items() if instrument in offsetdict)]
916 else:
917 ids = [id for id, slide in self.as_dict().items() if offsetdict == slide]
918 if len(ids) > 1:
919
920 if nonunique_ok:
921
922 return ids[0]
923
924 raise KeyError(offsetdict)
925 if len(ids) == 1:
926
927 return ids[0]
928
929 if create_new is None:
930
931 raise KeyError(offsetdict)
932
933 id = self.get_next_id()
934 for instrument, offset in offsetdict.items():
935 row = self.RowType()
936 row.process_id = create_new.process_id
937 row.time_slide_id = id
938 row.instrument = instrument
939 row.offset = offset
940 self.append(row)
941
942
943 return id
944
945
946
947
948
949
950
951
952
953
954
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992 TableByName = {
993 ProcessParamsTable.tableName: ProcessParamsTable,
994 TimeSlideTable.tableName: TimeSlideTable
995 }
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1013 """
1014 Modify ContentHandler, a sub-class of
1015 glue.ligolw.LIGOLWContentHandler, to cause it to use the DBTable
1016 class defined in this module when parsing XML documents. Instances
1017 of the class must provide a connection attribute. When a document
1018 is parsed, the value of this attribute will be passed to the
1019 DBTable class' .__init__() method as each table object is created,
1020 and thus sets the database connection for all table objects in the
1021 document.
1022
1023 Example:
1024
1025 >>> import sqlite3
1026 >>> from glue.ligolw import ligolw
1027 >>> class MyContentHandler(ligolw.LIGOLWContentHandler):
1028 ... def __init__(self, *args):
1029 ... super(MyContentHandler, self).__init__(*args)
1030 ... self.connection = sqlite3.connection()
1031 ...
1032 >>> use_in(MyContentHandler)
1033
1034 Multiple database files can be in use at once by creating a content
1035 handler class for each one.
1036 """
1037 ContentHandler = lsctables.use_in(ContentHandler)
1038
1039 def startTable(self, parent, attrs):
1040 name = table.Table.TableName(attrs[u"Name"])
1041 if name in TableByName:
1042 return TableByName[name](attrs, connection = self.connection)
1043 return DBTable(attrs, connection = self.connection)
1044
1045 ContentHandler.startTable = startTable
1046
1047 return ContentHandler
1048