##// END OF EJS Templates
transactions: add version number to journal.backupfiles...
Durham Goode -
r23064:5dc888b7 stable
parent child Browse files
Show More
@@ -1,357 +1,370
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 version = 1
19
18 def active(func):
20 def active(func):
19 def _active(self, *args, **kwds):
21 def _active(self, *args, **kwds):
20 if self.count == 0:
22 if self.count == 0:
21 raise error.Abort(_(
23 raise error.Abort(_(
22 'cannot use transaction when it is already committed/aborted'))
24 'cannot use transaction when it is already committed/aborted'))
23 return func(self, *args, **kwds)
25 return func(self, *args, **kwds)
24 return _active
26 return _active
25
27
26 def _playback(journal, report, opener, entries, backupentries, unlink=True):
28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
27 for f, o, _ignore in entries:
29 for f, o, _ignore in entries:
28 if o or not unlink:
30 if o or not unlink:
29 try:
31 try:
30 fp = opener(f, 'a')
32 fp = opener(f, 'a')
31 fp.truncate(o)
33 fp.truncate(o)
32 fp.close()
34 fp.close()
33 except IOError:
35 except IOError:
34 report(_("failed to truncate %s\n") % f)
36 report(_("failed to truncate %s\n") % f)
35 raise
37 raise
36 else:
38 else:
37 try:
39 try:
38 opener.unlink(f)
40 opener.unlink(f)
39 except (IOError, OSError), inst:
41 except (IOError, OSError), inst:
40 if inst.errno != errno.ENOENT:
42 if inst.errno != errno.ENOENT:
41 raise
43 raise
42
44
43 backupfiles = []
45 backupfiles = []
44 for f, b, _ignore in backupentries:
46 for f, b, _ignore in backupentries:
45 filepath = opener.join(f)
47 filepath = opener.join(f)
46 backuppath = opener.join(b)
48 backuppath = opener.join(b)
47 try:
49 try:
48 util.copyfile(backuppath, filepath)
50 util.copyfile(backuppath, filepath)
49 backupfiles.append(b)
51 backupfiles.append(b)
50 except IOError:
52 except IOError:
51 report(_("failed to recover %s\n") % f)
53 report(_("failed to recover %s\n") % f)
52 raise
54 raise
53
55
54 opener.unlink(journal)
56 opener.unlink(journal)
55 backuppath = "%s.backupfiles" % journal
57 backuppath = "%s.backupfiles" % journal
56 if opener.exists(backuppath):
58 if opener.exists(backuppath):
57 opener.unlink(backuppath)
59 opener.unlink(backuppath)
58 for f in backupfiles:
60 for f in backupfiles:
59 opener.unlink(f)
61 opener.unlink(f)
60
62
61 class transaction(object):
63 class transaction(object):
62 def __init__(self, report, opener, journal, after=None, createmode=None,
64 def __init__(self, report, opener, journal, after=None, createmode=None,
63 onclose=None, onabort=None):
65 onclose=None, onabort=None):
64 """Begin a new transaction
66 """Begin a new transaction
65
67
66 Begins a new transaction that allows rolling back writes in the event of
68 Begins a new transaction that allows rolling back writes in the event of
67 an exception.
69 an exception.
68
70
69 * `after`: called after the transaction has been committed
71 * `after`: called after the transaction has been committed
70 * `createmode`: the mode of the journal file that will be created
72 * `createmode`: the mode of the journal file that will be created
71 * `onclose`: called as the transaction is closing, but before it is
73 * `onclose`: called as the transaction is closing, but before it is
72 closed
74 closed
73 * `onabort`: called as the transaction is aborting, but before any files
75 * `onabort`: called as the transaction is aborting, but before any files
74 have been truncated
76 have been truncated
75 """
77 """
76 self.count = 1
78 self.count = 1
77 self.usages = 1
79 self.usages = 1
78 self.report = report
80 self.report = report
79 self.opener = opener
81 self.opener = opener
80 self.after = after
82 self.after = after
81 self.onclose = onclose
83 self.onclose = onclose
82 self.onabort = onabort
84 self.onabort = onabort
83 self.entries = []
85 self.entries = []
84 self.backupentries = []
86 self.backupentries = []
85 self.map = {}
87 self.map = {}
86 self.backupmap = {}
88 self.backupmap = {}
87 self.journal = journal
89 self.journal = journal
88 self._queue = []
90 self._queue = []
89 # a dict of arguments to be passed to hooks
91 # a dict of arguments to be passed to hooks
90 self.hookargs = {}
92 self.hookargs = {}
91
93
92 self.backupjournal = "%s.backupfiles" % journal
94 self.backupjournal = "%s.backupfiles" % journal
93 self.file = opener.open(self.journal, "w")
95 self.file = opener.open(self.journal, "w")
94 self.backupsfile = opener.open(self.backupjournal, 'w')
96 self.backupsfile = opener.open(self.backupjournal, 'w')
97 self.backupsfile.write('%d\n' % version)
95 if createmode is not None:
98 if createmode is not None:
96 opener.chmod(self.journal, createmode & 0666)
99 opener.chmod(self.journal, createmode & 0666)
97 opener.chmod(self.backupjournal, createmode & 0666)
100 opener.chmod(self.backupjournal, createmode & 0666)
98
101
99 # hold file generations to be performed on commit
102 # hold file generations to be performed on commit
100 self._filegenerators = {}
103 self._filegenerators = {}
101
104
102 def __del__(self):
105 def __del__(self):
103 if self.journal:
106 if self.journal:
104 self._abort()
107 self._abort()
105
108
106 @active
109 @active
107 def startgroup(self):
110 def startgroup(self):
108 self._queue.append(([], []))
111 self._queue.append(([], []))
109
112
110 @active
113 @active
111 def endgroup(self):
114 def endgroup(self):
112 q = self._queue.pop()
115 q = self._queue.pop()
113 self.entries.extend(q[0])
116 self.entries.extend(q[0])
114 self.backupentries.extend(q[1])
117 self.backupentries.extend(q[1])
115
118
116 offsets = []
119 offsets = []
117 backups = []
120 backups = []
118 for f, o, _data in q[0]:
121 for f, o, _data in q[0]:
119 offsets.append((f, o))
122 offsets.append((f, o))
120
123
121 for f, b, _data in q[1]:
124 for f, b, _data in q[1]:
122 backups.append((f, b))
125 backups.append((f, b))
123
126
124 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
127 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
125 self.file.write(d)
128 self.file.write(d)
126 self.file.flush()
129 self.file.flush()
127
130
128 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
131 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
129 self.backupsfile.write(d)
132 self.backupsfile.write(d)
130 self.backupsfile.flush()
133 self.backupsfile.flush()
131
134
132 @active
135 @active
133 def add(self, file, offset, data=None):
136 def add(self, file, offset, data=None):
134 if file in self.map or file in self.backupmap:
137 if file in self.map or file in self.backupmap:
135 return
138 return
136 if self._queue:
139 if self._queue:
137 self._queue[-1][0].append((file, offset, data))
140 self._queue[-1][0].append((file, offset, data))
138 return
141 return
139
142
140 self.entries.append((file, offset, data))
143 self.entries.append((file, offset, data))
141 self.map[file] = len(self.entries) - 1
144 self.map[file] = len(self.entries) - 1
142 # add enough data to the journal to do the truncate
145 # add enough data to the journal to do the truncate
143 self.file.write("%s\0%d\n" % (file, offset))
146 self.file.write("%s\0%d\n" % (file, offset))
144 self.file.flush()
147 self.file.flush()
145
148
146 @active
149 @active
147 def addbackup(self, file, hardlink=True, vfs=None):
150 def addbackup(self, file, hardlink=True, vfs=None):
148 """Adds a backup of the file to the transaction
151 """Adds a backup of the file to the transaction
149
152
150 Calling addbackup() creates a hardlink backup of the specified file
153 Calling addbackup() creates a hardlink backup of the specified file
151 that is used to recover the file in the event of the transaction
154 that is used to recover the file in the event of the transaction
152 aborting.
155 aborting.
153
156
154 * `file`: the file path, relative to .hg/store
157 * `file`: the file path, relative to .hg/store
155 * `hardlink`: use a hardlink to quickly create the backup
158 * `hardlink`: use a hardlink to quickly create the backup
156 """
159 """
157
160
158 if file in self.map or file in self.backupmap:
161 if file in self.map or file in self.backupmap:
159 return
162 return
160 backupfile = "%s.backup.%s" % (self.journal, file)
163 backupfile = "%s.backup.%s" % (self.journal, file)
161 if vfs is None:
164 if vfs is None:
162 vfs = self.opener
165 vfs = self.opener
163 if vfs.exists(file):
166 if vfs.exists(file):
164 filepath = vfs.join(file)
167 filepath = vfs.join(file)
165 backuppath = self.opener.join(backupfile)
168 backuppath = self.opener.join(backupfile)
166 util.copyfiles(filepath, backuppath, hardlink=hardlink)
169 util.copyfiles(filepath, backuppath, hardlink=hardlink)
167 else:
170 else:
168 self.add(file, 0)
171 self.add(file, 0)
169 return
172 return
170
173
171 if self._queue:
174 if self._queue:
172 self._queue[-1][1].append((file, backupfile))
175 self._queue[-1][1].append((file, backupfile))
173 return
176 return
174
177
175 self.backupentries.append((file, backupfile, None))
178 self.backupentries.append((file, backupfile, None))
176 self.backupmap[file] = len(self.backupentries) - 1
179 self.backupmap[file] = len(self.backupentries) - 1
177 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
180 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
178 self.backupsfile.flush()
181 self.backupsfile.flush()
179
182
180 @active
183 @active
181 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
184 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
182 """add a function to generates some files at transaction commit
185 """add a function to generates some files at transaction commit
183
186
184 The `genfunc` argument is a function capable of generating proper
187 The `genfunc` argument is a function capable of generating proper
185 content of each entry in the `filename` tuple.
188 content of each entry in the `filename` tuple.
186
189
187 At transaction close time, `genfunc` will be called with one file
190 At transaction close time, `genfunc` will be called with one file
188 object argument per entries in `filenames`.
191 object argument per entries in `filenames`.
189
192
190 The transaction itself is responsible for the backup, creation and
193 The transaction itself is responsible for the backup, creation and
191 final write of such file.
194 final write of such file.
192
195
193 The `genid` argument is used to ensure the same set of file is only
196 The `genid` argument is used to ensure the same set of file is only
194 generated once. Call to `addfilegenerator` for a `genid` already
197 generated once. Call to `addfilegenerator` for a `genid` already
195 present will overwrite the old entry.
198 present will overwrite the old entry.
196
199
197 The `order` argument may be used to control the order in which multiple
200 The `order` argument may be used to control the order in which multiple
198 generator will be executed.
201 generator will be executed.
199 """
202 """
200 # For now, we are unable to do proper backup and restore of custom vfs
203 # For now, we are unable to do proper backup and restore of custom vfs
201 # but for bookmarks that are handled outside this mechanism.
204 # but for bookmarks that are handled outside this mechanism.
202 assert vfs is None or filenames == ('bookmarks',)
205 assert vfs is None or filenames == ('bookmarks',)
203 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
206 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
204
207
205 @active
208 @active
206 def find(self, file):
209 def find(self, file):
207 if file in self.map:
210 if file in self.map:
208 return self.entries[self.map[file]]
211 return self.entries[self.map[file]]
209 if file in self.backupmap:
212 if file in self.backupmap:
210 return self.backupentries[self.backupmap[file]]
213 return self.backupentries[self.backupmap[file]]
211 return None
214 return None
212
215
213 @active
216 @active
214 def replace(self, file, offset, data=None):
217 def replace(self, file, offset, data=None):
215 '''
218 '''
216 replace can only replace already committed entries
219 replace can only replace already committed entries
217 that are not pending in the queue
220 that are not pending in the queue
218 '''
221 '''
219
222
220 if file not in self.map:
223 if file not in self.map:
221 raise KeyError(file)
224 raise KeyError(file)
222 index = self.map[file]
225 index = self.map[file]
223 self.entries[index] = (file, offset, data)
226 self.entries[index] = (file, offset, data)
224 self.file.write("%s\0%d\n" % (file, offset))
227 self.file.write("%s\0%d\n" % (file, offset))
225 self.file.flush()
228 self.file.flush()
226
229
227 @active
230 @active
228 def nest(self):
231 def nest(self):
229 self.count += 1
232 self.count += 1
230 self.usages += 1
233 self.usages += 1
231 return self
234 return self
232
235
233 def release(self):
236 def release(self):
234 if self.count > 0:
237 if self.count > 0:
235 self.usages -= 1
238 self.usages -= 1
236 # if the transaction scopes are left without being closed, fail
239 # if the transaction scopes are left without being closed, fail
237 if self.count > 0 and self.usages == 0:
240 if self.count > 0 and self.usages == 0:
238 self._abort()
241 self._abort()
239
242
240 def running(self):
243 def running(self):
241 return self.count > 0
244 return self.count > 0
242
245
243 @active
246 @active
244 def close(self):
247 def close(self):
245 '''commit the transaction'''
248 '''commit the transaction'''
246 # write files registered for generation
249 # write files registered for generation
247 for entry in sorted(self._filegenerators.values()):
250 for entry in sorted(self._filegenerators.values()):
248 order, filenames, genfunc, vfs = entry
251 order, filenames, genfunc, vfs = entry
249 if vfs is None:
252 if vfs is None:
250 vfs = self.opener
253 vfs = self.opener
251 files = []
254 files = []
252 try:
255 try:
253 for name in filenames:
256 for name in filenames:
254 # Some files are already backed up when creating the
257 # Some files are already backed up when creating the
255 # localrepo. Until this is properly fixed we disable the
258 # localrepo. Until this is properly fixed we disable the
256 # backup for them.
259 # backup for them.
257 if name not in ('phaseroots', 'bookmarks'):
260 if name not in ('phaseroots', 'bookmarks'):
258 self.addbackup(name)
261 self.addbackup(name)
259 files.append(vfs(name, 'w', atomictemp=True))
262 files.append(vfs(name, 'w', atomictemp=True))
260 genfunc(*files)
263 genfunc(*files)
261 finally:
264 finally:
262 for f in files:
265 for f in files:
263 f.close()
266 f.close()
264
267
265 if self.count == 1 and self.onclose is not None:
268 if self.count == 1 and self.onclose is not None:
266 self.onclose()
269 self.onclose()
267
270
268 self.count -= 1
271 self.count -= 1
269 if self.count != 0:
272 if self.count != 0:
270 return
273 return
271 self.file.close()
274 self.file.close()
272 self.backupsfile.close()
275 self.backupsfile.close()
273 self.entries = []
276 self.entries = []
274 if self.after:
277 if self.after:
275 self.after()
278 self.after()
276 if self.opener.isfile(self.journal):
279 if self.opener.isfile(self.journal):
277 self.opener.unlink(self.journal)
280 self.opener.unlink(self.journal)
278 if self.opener.isfile(self.backupjournal):
281 if self.opener.isfile(self.backupjournal):
279 self.opener.unlink(self.backupjournal)
282 self.opener.unlink(self.backupjournal)
280 for _f, b, _ignore in self.backupentries:
283 for _f, b, _ignore in self.backupentries:
281 self.opener.unlink(b)
284 self.opener.unlink(b)
282 self.backupentries = []
285 self.backupentries = []
283 self.journal = None
286 self.journal = None
284
287
285 @active
288 @active
286 def abort(self):
289 def abort(self):
287 '''abort the transaction (generally called on error, or when the
290 '''abort the transaction (generally called on error, or when the
288 transaction is not explicitly committed before going out of
291 transaction is not explicitly committed before going out of
289 scope)'''
292 scope)'''
290 self._abort()
293 self._abort()
291
294
292 def _abort(self):
295 def _abort(self):
293 self.count = 0
296 self.count = 0
294 self.usages = 0
297 self.usages = 0
295 self.file.close()
298 self.file.close()
296 self.backupsfile.close()
299 self.backupsfile.close()
297
300
298 if self.onabort is not None:
301 if self.onabort is not None:
299 self.onabort()
302 self.onabort()
300
303
301 try:
304 try:
302 if not self.entries and not self.backupentries:
305 if not self.entries and not self.backupentries:
303 if self.journal:
306 if self.journal:
304 self.opener.unlink(self.journal)
307 self.opener.unlink(self.journal)
305 if self.backupjournal:
308 if self.backupjournal:
306 self.opener.unlink(self.backupjournal)
309 self.opener.unlink(self.backupjournal)
307 return
310 return
308
311
309 self.report(_("transaction abort!\n"))
312 self.report(_("transaction abort!\n"))
310
313
311 try:
314 try:
312 _playback(self.journal, self.report, self.opener,
315 _playback(self.journal, self.report, self.opener,
313 self.entries, self.backupentries, False)
316 self.entries, self.backupentries, False)
314 self.report(_("rollback completed\n"))
317 self.report(_("rollback completed\n"))
315 except Exception:
318 except Exception:
316 self.report(_("rollback failed - please run hg recover\n"))
319 self.report(_("rollback failed - please run hg recover\n"))
317 finally:
320 finally:
318 self.journal = None
321 self.journal = None
319
322
320
323
321 def rollback(opener, file, report):
324 def rollback(opener, file, report):
322 """Rolls back the transaction contained in the given file
325 """Rolls back the transaction contained in the given file
323
326
324 Reads the entries in the specified file, and the corresponding
327 Reads the entries in the specified file, and the corresponding
325 '*.backupfiles' file, to recover from an incomplete transaction.
328 '*.backupfiles' file, to recover from an incomplete transaction.
326
329
327 * `file`: a file containing a list of entries, specifying where
330 * `file`: a file containing a list of entries, specifying where
328 to truncate each file. The file should contain a list of
331 to truncate each file. The file should contain a list of
329 file\0offset pairs, delimited by newlines. The corresponding
332 file\0offset pairs, delimited by newlines. The corresponding
330 '*.backupfiles' file should contain a list of file\0backupfile
333 '*.backupfiles' file should contain a list of file\0backupfile
331 pairs, delimited by \0.
334 pairs, delimited by \0.
332 """
335 """
333 entries = []
336 entries = []
334 backupentries = []
337 backupentries = []
335
338
336 fp = opener.open(file)
339 fp = opener.open(file)
337 lines = fp.readlines()
340 lines = fp.readlines()
338 fp.close()
341 fp.close()
339 for l in lines:
342 for l in lines:
340 try:
343 try:
341 f, o = l.split('\0')
344 f, o = l.split('\0')
342 entries.append((f, int(o), None))
345 entries.append((f, int(o), None))
343 except ValueError:
346 except ValueError:
344 report(_("couldn't read journal entry %r!\n") % l)
347 report(_("couldn't read journal entry %r!\n") % l)
345
348
346 backupjournal = "%s.backupfiles" % file
349 backupjournal = "%s.backupfiles" % file
347 if opener.exists(backupjournal):
350 if opener.exists(backupjournal):
348 fp = opener.open(backupjournal)
351 fp = opener.open(backupjournal)
349 data = fp.read()
352 data = fp.read()
350 if len(data) > 0:
353 if len(data) > 0:
354 ver = version
355 versionend = data.find('\n')
356 if versionend != -1:
357 ver = data[:versionend]
358 data = data[versionend + 1:]
359
360 if ver == str(version):
351 parts = data.split('\0')
361 parts = data.split('\0')
352 # Skip the final part, since it's just a trailing empty space
362 # Skip the final part, since it's just a trailing empty space
353 for i in xrange(0, len(parts) - 1, 2):
363 for i in xrange(0, len(parts) - 1, 2):
354 f, b = parts[i:i + 2]
364 f, b = parts[i:i + 2]
355 backupentries.append((f, b, None))
365 backupentries.append((f, b, None))
366 else:
367 report(_("journal was created by a newer version of "
368 "Mercurial"))
356
369
357 _playback(file, report, opener, entries, backupentries)
370 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now