##// END OF EJS Templates
transaction: only generate file when we actually close the transaction...
Pierre-Yves David -
r23103:29bfa964 stable
parent child Browse files
Show More
@@ -1,368 +1,368 b''
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
18 version = 1
19
19
20 def active(func):
20 def active(func):
21 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
22 if self.count == 0:
22 if self.count == 0:
23 raise error.Abort(_(
23 raise error.Abort(_(
24 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
25 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
26 return _active
26 return _active
27
27
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
30 if o or not unlink:
30 if o or not unlink:
31 try:
31 try:
32 fp = opener(f, 'a')
32 fp = opener(f, 'a')
33 fp.truncate(o)
33 fp.truncate(o)
34 fp.close()
34 fp.close()
35 except IOError:
35 except IOError:
36 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
37 raise
37 raise
38 else:
38 else:
39 try:
39 try:
40 opener.unlink(f)
40 opener.unlink(f)
41 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
42 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
43 raise
43 raise
44
44
45 backupfiles = []
45 backupfiles = []
46 for f, b, _ignore in backupentries:
46 for f, b, _ignore in backupentries:
47 filepath = opener.join(f)
47 filepath = opener.join(f)
48 backuppath = opener.join(b)
48 backuppath = opener.join(b)
49 try:
49 try:
50 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
51 backupfiles.append(b)
51 backupfiles.append(b)
52 except IOError:
52 except IOError:
53 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
54 raise
54 raise
55
55
56 opener.unlink(journal)
56 opener.unlink(journal)
57 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
58 if opener.exists(backuppath):
58 if opener.exists(backuppath):
59 opener.unlink(backuppath)
59 opener.unlink(backuppath)
60 for f in backupfiles:
60 for f in backupfiles:
61 opener.unlink(f)
61 opener.unlink(f)
62
62
63 class transaction(object):
63 class transaction(object):
64 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 onclose=None, onabort=None):
65 onclose=None, onabort=None):
66 """Begin a new transaction
66 """Begin a new transaction
67
67
68 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
69 an exception.
69 an exception.
70
70
71 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
72 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
73 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
74 closed
74 closed
75 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
76 have been truncated
76 have been truncated
77 """
77 """
78 self.count = 1
78 self.count = 1
79 self.usages = 1
79 self.usages = 1
80 self.report = report
80 self.report = report
81 self.opener = opener
81 self.opener = opener
82 self.after = after
82 self.after = after
83 self.onclose = onclose
83 self.onclose = onclose
84 self.onabort = onabort
84 self.onabort = onabort
85 self.entries = []
85 self.entries = []
86 self.backupentries = []
86 self.backupentries = []
87 self.map = {}
87 self.map = {}
88 self.backupmap = {}
88 self.backupmap = {}
89 self.journal = journal
89 self.journal = journal
90 self._queue = []
90 self._queue = []
91 # a dict of arguments to be passed to hooks
91 # a dict of arguments to be passed to hooks
92 self.hookargs = {}
92 self.hookargs = {}
93
93
94 self.backupjournal = "%s.backupfiles" % journal
94 self.backupjournal = "%s.backupfiles" % journal
95 self.file = opener.open(self.journal, "w")
95 self.file = opener.open(self.journal, "w")
96 self.backupsfile = opener.open(self.backupjournal, 'w')
96 self.backupsfile = opener.open(self.backupjournal, 'w')
97 self.backupsfile.write('%d\n' % version)
97 self.backupsfile.write('%d\n' % version)
98 if createmode is not None:
98 if createmode is not None:
99 opener.chmod(self.journal, createmode & 0666)
99 opener.chmod(self.journal, createmode & 0666)
100 opener.chmod(self.backupjournal, createmode & 0666)
100 opener.chmod(self.backupjournal, createmode & 0666)
101
101
102 # hold file generations to be performed on commit
102 # hold file generations to be performed on commit
103 self._filegenerators = {}
103 self._filegenerators = {}
104
104
105 def __del__(self):
105 def __del__(self):
106 if self.journal:
106 if self.journal:
107 self._abort()
107 self._abort()
108
108
109 @active
109 @active
110 def startgroup(self):
110 def startgroup(self):
111 self._queue.append(([], []))
111 self._queue.append(([], []))
112
112
113 @active
113 @active
114 def endgroup(self):
114 def endgroup(self):
115 q = self._queue.pop()
115 q = self._queue.pop()
116 self.entries.extend(q[0])
116 self.entries.extend(q[0])
117 self.backupentries.extend(q[1])
117 self.backupentries.extend(q[1])
118
118
119 offsets = []
119 offsets = []
120 backups = []
120 backups = []
121 for f, o, _data in q[0]:
121 for f, o, _data in q[0]:
122 offsets.append((f, o))
122 offsets.append((f, o))
123
123
124 for f, b, _data in q[1]:
124 for f, b, _data in q[1]:
125 backups.append((f, b))
125 backups.append((f, b))
126
126
127 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
127 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
128 self.file.write(d)
128 self.file.write(d)
129 self.file.flush()
129 self.file.flush()
130
130
131 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
131 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
132 self.backupsfile.write(d)
132 self.backupsfile.write(d)
133 self.backupsfile.flush()
133 self.backupsfile.flush()
134
134
135 @active
135 @active
136 def add(self, file, offset, data=None):
136 def add(self, file, offset, data=None):
137 if file in self.map or file in self.backupmap:
137 if file in self.map or file in self.backupmap:
138 return
138 return
139 if self._queue:
139 if self._queue:
140 self._queue[-1][0].append((file, offset, data))
140 self._queue[-1][0].append((file, offset, data))
141 return
141 return
142
142
143 self.entries.append((file, offset, data))
143 self.entries.append((file, offset, data))
144 self.map[file] = len(self.entries) - 1
144 self.map[file] = len(self.entries) - 1
145 # add enough data to the journal to do the truncate
145 # add enough data to the journal to do the truncate
146 self.file.write("%s\0%d\n" % (file, offset))
146 self.file.write("%s\0%d\n" % (file, offset))
147 self.file.flush()
147 self.file.flush()
148
148
149 @active
149 @active
150 def addbackup(self, file, hardlink=True, vfs=None):
150 def addbackup(self, file, hardlink=True, vfs=None):
151 """Adds a backup of the file to the transaction
151 """Adds a backup of the file to the transaction
152
152
153 Calling addbackup() creates a hardlink backup of the specified file
153 Calling addbackup() creates a hardlink backup of the specified file
154 that is used to recover the file in the event of the transaction
154 that is used to recover the file in the event of the transaction
155 aborting.
155 aborting.
156
156
157 * `file`: the file path, relative to .hg/store
157 * `file`: the file path, relative to .hg/store
158 * `hardlink`: use a hardlink to quickly create the backup
158 * `hardlink`: use a hardlink to quickly create the backup
159 """
159 """
160
160
161 if file in self.map or file in self.backupmap:
161 if file in self.map or file in self.backupmap:
162 return
162 return
163 backupfile = "%s.backup.%s" % (self.journal, file)
163 backupfile = "%s.backup.%s" % (self.journal, file)
164 if vfs is None:
164 if vfs is None:
165 vfs = self.opener
165 vfs = self.opener
166 if vfs.exists(file):
166 if vfs.exists(file):
167 filepath = vfs.join(file)
167 filepath = vfs.join(file)
168 backuppath = self.opener.join(backupfile)
168 backuppath = self.opener.join(backupfile)
169 util.copyfiles(filepath, backuppath, hardlink=hardlink)
169 util.copyfiles(filepath, backuppath, hardlink=hardlink)
170 else:
170 else:
171 self.add(file, 0)
171 self.add(file, 0)
172 return
172 return
173
173
174 if self._queue:
174 if self._queue:
175 self._queue[-1][1].append((file, backupfile))
175 self._queue[-1][1].append((file, backupfile))
176 return
176 return
177
177
178 self.backupentries.append((file, backupfile, None))
178 self.backupentries.append((file, backupfile, None))
179 self.backupmap[file] = len(self.backupentries) - 1
179 self.backupmap[file] = len(self.backupentries) - 1
180 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
180 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
181 self.backupsfile.flush()
181 self.backupsfile.flush()
182
182
183 @active
183 @active
184 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
184 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
185 """add a function to generates some files at transaction commit
185 """add a function to generates some files at transaction commit
186
186
187 The `genfunc` argument is a function capable of generating proper
187 The `genfunc` argument is a function capable of generating proper
188 content of each entry in the `filename` tuple.
188 content of each entry in the `filename` tuple.
189
189
190 At transaction close time, `genfunc` will be called with one file
190 At transaction close time, `genfunc` will be called with one file
191 object argument per entries in `filenames`.
191 object argument per entries in `filenames`.
192
192
193 The transaction itself is responsible for the backup, creation and
193 The transaction itself is responsible for the backup, creation and
194 final write of such file.
194 final write of such file.
195
195
196 The `genid` argument is used to ensure the same set of file is only
196 The `genid` argument is used to ensure the same set of file is only
197 generated once. Call to `addfilegenerator` for a `genid` already
197 generated once. Call to `addfilegenerator` for a `genid` already
198 present will overwrite the old entry.
198 present will overwrite the old entry.
199
199
200 The `order` argument may be used to control the order in which multiple
200 The `order` argument may be used to control the order in which multiple
201 generator will be executed.
201 generator will be executed.
202 """
202 """
203 # For now, we are unable to do proper backup and restore of custom vfs
203 # For now, we are unable to do proper backup and restore of custom vfs
204 # but for bookmarks that are handled outside this mechanism.
204 # but for bookmarks that are handled outside this mechanism.
205 assert vfs is None or filenames == ('bookmarks',)
205 assert vfs is None or filenames == ('bookmarks',)
206 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
206 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
207
207
208 def _generatefiles(self):
208 def _generatefiles(self):
209 # write files registered for generation
209 # write files registered for generation
210 for entry in sorted(self._filegenerators.values()):
210 for entry in sorted(self._filegenerators.values()):
211 order, filenames, genfunc, vfs = entry
211 order, filenames, genfunc, vfs = entry
212 if vfs is None:
212 if vfs is None:
213 vfs = self.opener
213 vfs = self.opener
214 files = []
214 files = []
215 try:
215 try:
216 for name in filenames:
216 for name in filenames:
217 # Some files are already backed up when creating the
217 # Some files are already backed up when creating the
218 # localrepo. Until this is properly fixed we disable the
218 # localrepo. Until this is properly fixed we disable the
219 # backup for them.
219 # backup for them.
220 if name not in ('phaseroots', 'bookmarks'):
220 if name not in ('phaseroots', 'bookmarks'):
221 self.addbackup(name)
221 self.addbackup(name)
222 files.append(vfs(name, 'w', atomictemp=True))
222 files.append(vfs(name, 'w', atomictemp=True))
223 genfunc(*files)
223 genfunc(*files)
224 finally:
224 finally:
225 for f in files:
225 for f in files:
226 f.close()
226 f.close()
227
227
228 @active
228 @active
229 def find(self, file):
229 def find(self, file):
230 if file in self.map:
230 if file in self.map:
231 return self.entries[self.map[file]]
231 return self.entries[self.map[file]]
232 if file in self.backupmap:
232 if file in self.backupmap:
233 return self.backupentries[self.backupmap[file]]
233 return self.backupentries[self.backupmap[file]]
234 return None
234 return None
235
235
236 @active
236 @active
237 def replace(self, file, offset, data=None):
237 def replace(self, file, offset, data=None):
238 '''
238 '''
239 replace can only replace already committed entries
239 replace can only replace already committed entries
240 that are not pending in the queue
240 that are not pending in the queue
241 '''
241 '''
242
242
243 if file not in self.map:
243 if file not in self.map:
244 raise KeyError(file)
244 raise KeyError(file)
245 index = self.map[file]
245 index = self.map[file]
246 self.entries[index] = (file, offset, data)
246 self.entries[index] = (file, offset, data)
247 self.file.write("%s\0%d\n" % (file, offset))
247 self.file.write("%s\0%d\n" % (file, offset))
248 self.file.flush()
248 self.file.flush()
249
249
250 @active
250 @active
251 def nest(self):
251 def nest(self):
252 self.count += 1
252 self.count += 1
253 self.usages += 1
253 self.usages += 1
254 return self
254 return self
255
255
256 def release(self):
256 def release(self):
257 if self.count > 0:
257 if self.count > 0:
258 self.usages -= 1
258 self.usages -= 1
259 # if the transaction scopes are left without being closed, fail
259 # if the transaction scopes are left without being closed, fail
260 if self.count > 0 and self.usages == 0:
260 if self.count > 0 and self.usages == 0:
261 self._abort()
261 self._abort()
262
262
263 def running(self):
263 def running(self):
264 return self.count > 0
264 return self.count > 0
265
265
266 @active
266 @active
267 def close(self):
267 def close(self):
268 '''commit the transaction'''
268 '''commit the transaction'''
269 self._generatefiles()
270 if self.count == 1 and self.onclose is not None:
269 if self.count == 1 and self.onclose is not None:
270 self._generatefiles()
271 self.onclose()
271 self.onclose()
272
272
273 self.count -= 1
273 self.count -= 1
274 if self.count != 0:
274 if self.count != 0:
275 return
275 return
276 self.file.close()
276 self.file.close()
277 self.backupsfile.close()
277 self.backupsfile.close()
278 self.entries = []
278 self.entries = []
279 if self.after:
279 if self.after:
280 self.after()
280 self.after()
281 if self.opener.isfile(self.journal):
281 if self.opener.isfile(self.journal):
282 self.opener.unlink(self.journal)
282 self.opener.unlink(self.journal)
283 if self.opener.isfile(self.backupjournal):
283 if self.opener.isfile(self.backupjournal):
284 self.opener.unlink(self.backupjournal)
284 self.opener.unlink(self.backupjournal)
285 for _f, b, _ignore in self.backupentries:
285 for _f, b, _ignore in self.backupentries:
286 self.opener.unlink(b)
286 self.opener.unlink(b)
287 self.backupentries = []
287 self.backupentries = []
288 self.journal = None
288 self.journal = None
289
289
290 @active
290 @active
291 def abort(self):
291 def abort(self):
292 '''abort the transaction (generally called on error, or when the
292 '''abort the transaction (generally called on error, or when the
293 transaction is not explicitly committed before going out of
293 transaction is not explicitly committed before going out of
294 scope)'''
294 scope)'''
295 self._abort()
295 self._abort()
296
296
297 def _abort(self):
297 def _abort(self):
298 self.count = 0
298 self.count = 0
299 self.usages = 0
299 self.usages = 0
300 self.file.close()
300 self.file.close()
301 self.backupsfile.close()
301 self.backupsfile.close()
302
302
303 if self.onabort is not None:
303 if self.onabort is not None:
304 self.onabort()
304 self.onabort()
305
305
306 try:
306 try:
307 if not self.entries and not self.backupentries:
307 if not self.entries and not self.backupentries:
308 if self.journal:
308 if self.journal:
309 self.opener.unlink(self.journal)
309 self.opener.unlink(self.journal)
310 if self.backupjournal:
310 if self.backupjournal:
311 self.opener.unlink(self.backupjournal)
311 self.opener.unlink(self.backupjournal)
312 return
312 return
313
313
314 self.report(_("transaction abort!\n"))
314 self.report(_("transaction abort!\n"))
315
315
316 try:
316 try:
317 _playback(self.journal, self.report, self.opener,
317 _playback(self.journal, self.report, self.opener,
318 self.entries, self.backupentries, False)
318 self.entries, self.backupentries, False)
319 self.report(_("rollback completed\n"))
319 self.report(_("rollback completed\n"))
320 except Exception:
320 except Exception:
321 self.report(_("rollback failed - please run hg recover\n"))
321 self.report(_("rollback failed - please run hg recover\n"))
322 finally:
322 finally:
323 self.journal = None
323 self.journal = None
324
324
325
325
326 def rollback(opener, file, report):
326 def rollback(opener, file, report):
327 """Rolls back the transaction contained in the given file
327 """Rolls back the transaction contained in the given file
328
328
329 Reads the entries in the specified file, and the corresponding
329 Reads the entries in the specified file, and the corresponding
330 '*.backupfiles' file, to recover from an incomplete transaction.
330 '*.backupfiles' file, to recover from an incomplete transaction.
331
331
332 * `file`: a file containing a list of entries, specifying where
332 * `file`: a file containing a list of entries, specifying where
333 to truncate each file. The file should contain a list of
333 to truncate each file. The file should contain a list of
334 file\0offset pairs, delimited by newlines. The corresponding
334 file\0offset pairs, delimited by newlines. The corresponding
335 '*.backupfiles' file should contain a list of file\0backupfile
335 '*.backupfiles' file should contain a list of file\0backupfile
336 pairs, delimited by \0.
336 pairs, delimited by \0.
337 """
337 """
338 entries = []
338 entries = []
339 backupentries = []
339 backupentries = []
340
340
341 fp = opener.open(file)
341 fp = opener.open(file)
342 lines = fp.readlines()
342 lines = fp.readlines()
343 fp.close()
343 fp.close()
344 for l in lines:
344 for l in lines:
345 try:
345 try:
346 f, o = l.split('\0')
346 f, o = l.split('\0')
347 entries.append((f, int(o), None))
347 entries.append((f, int(o), None))
348 except ValueError:
348 except ValueError:
349 report(_("couldn't read journal entry %r!\n") % l)
349 report(_("couldn't read journal entry %r!\n") % l)
350
350
351 backupjournal = "%s.backupfiles" % file
351 backupjournal = "%s.backupfiles" % file
352 if opener.exists(backupjournal):
352 if opener.exists(backupjournal):
353 fp = opener.open(backupjournal)
353 fp = opener.open(backupjournal)
354 lines = fp.readlines()
354 lines = fp.readlines()
355 if lines:
355 if lines:
356 ver = lines[0][:-1]
356 ver = lines[0][:-1]
357 if ver == str(version):
357 if ver == str(version):
358 for line in lines[1:]:
358 for line in lines[1:]:
359 if line:
359 if line:
360 # Shave off the trailing newline
360 # Shave off the trailing newline
361 line = line[:-1]
361 line = line[:-1]
362 f, b = line.split('\0')
362 f, b = line.split('\0')
363 backupentries.append((f, b, None))
363 backupentries.append((f, b, None))
364 else:
364 else:
365 report(_("journal was created by a newer version of "
365 report(_("journal was created by a newer version of "
366 "Mercurial"))
366 "Mercurial"))
367
367
368 _playback(file, report, opener, entries, backupentries)
368 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now