##// END OF EJS Templates
transaction: allow generating file outside of store...
Pierre-Yves David -
r22663:4c619873 default
parent child Browse files
Show More
@@ -1,348 +1,356
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import errno
15 import errno
16 import error, util
16 import error, util
17
17
18 def active(func):
18 def active(func):
19 def _active(self, *args, **kwds):
19 def _active(self, *args, **kwds):
20 if self.count == 0:
20 if self.count == 0:
21 raise error.Abort(_(
21 raise error.Abort(_(
22 'cannot use transaction when it is already committed/aborted'))
22 'cannot use transaction when it is already committed/aborted'))
23 return func(self, *args, **kwds)
23 return func(self, *args, **kwds)
24 return _active
24 return _active
25
25
26 def _playback(journal, report, opener, entries, backupentries, unlink=True):
26 def _playback(journal, report, opener, entries, backupentries, unlink=True):
27 for f, o, _ignore in entries:
27 for f, o, _ignore in entries:
28 if o or not unlink:
28 if o or not unlink:
29 try:
29 try:
30 fp = opener(f, 'a')
30 fp = opener(f, 'a')
31 fp.truncate(o)
31 fp.truncate(o)
32 fp.close()
32 fp.close()
33 except IOError:
33 except IOError:
34 report(_("failed to truncate %s\n") % f)
34 report(_("failed to truncate %s\n") % f)
35 raise
35 raise
36 else:
36 else:
37 try:
37 try:
38 opener.unlink(f)
38 opener.unlink(f)
39 except (IOError, OSError), inst:
39 except (IOError, OSError), inst:
40 if inst.errno != errno.ENOENT:
40 if inst.errno != errno.ENOENT:
41 raise
41 raise
42
42
43 backupfiles = []
43 backupfiles = []
44 for f, b, _ignore in backupentries:
44 for f, b, _ignore in backupentries:
45 filepath = opener.join(f)
45 filepath = opener.join(f)
46 backuppath = opener.join(b)
46 backuppath = opener.join(b)
47 try:
47 try:
48 util.copyfile(backuppath, filepath)
48 util.copyfile(backuppath, filepath)
49 backupfiles.append(b)
49 backupfiles.append(b)
50 except IOError:
50 except IOError:
51 report(_("failed to recover %s\n") % f)
51 report(_("failed to recover %s\n") % f)
52 raise
52 raise
53
53
54 opener.unlink(journal)
54 opener.unlink(journal)
55 backuppath = "%s.backupfiles" % journal
55 backuppath = "%s.backupfiles" % journal
56 if opener.exists(backuppath):
56 if opener.exists(backuppath):
57 opener.unlink(backuppath)
57 opener.unlink(backuppath)
58 for f in backupfiles:
58 for f in backupfiles:
59 opener.unlink(f)
59 opener.unlink(f)
60
60
61 class transaction(object):
61 class transaction(object):
62 def __init__(self, report, opener, journal, after=None, createmode=None,
62 def __init__(self, report, opener, journal, after=None, createmode=None,
63 onclose=None, onabort=None):
63 onclose=None, onabort=None):
64 """Begin a new transaction
64 """Begin a new transaction
65
65
66 Begins a new transaction that allows rolling back writes in the event of
66 Begins a new transaction that allows rolling back writes in the event of
67 an exception.
67 an exception.
68
68
69 * `after`: called after the transaction has been committed
69 * `after`: called after the transaction has been committed
70 * `createmode`: the mode of the journal file that will be created
70 * `createmode`: the mode of the journal file that will be created
71 * `onclose`: called as the transaction is closing, but before it is
71 * `onclose`: called as the transaction is closing, but before it is
72 closed
72 closed
73 * `onabort`: called as the transaction is aborting, but before any files
73 * `onabort`: called as the transaction is aborting, but before any files
74 have been truncated
74 have been truncated
75 """
75 """
76 self.count = 1
76 self.count = 1
77 self.usages = 1
77 self.usages = 1
78 self.report = report
78 self.report = report
79 self.opener = opener
79 self.opener = opener
80 self.after = after
80 self.after = after
81 self.onclose = onclose
81 self.onclose = onclose
82 self.onabort = onabort
82 self.onabort = onabort
83 self.entries = []
83 self.entries = []
84 self.backupentries = []
84 self.backupentries = []
85 self.map = {}
85 self.map = {}
86 self.backupmap = {}
86 self.backupmap = {}
87 self.journal = journal
87 self.journal = journal
88 self._queue = []
88 self._queue = []
89 # a dict of arguments to be passed to hooks
89 # a dict of arguments to be passed to hooks
90 self.hookargs = {}
90 self.hookargs = {}
91
91
92 self.backupjournal = "%s.backupfiles" % journal
92 self.backupjournal = "%s.backupfiles" % journal
93 self.file = opener.open(self.journal, "w")
93 self.file = opener.open(self.journal, "w")
94 self.backupsfile = opener.open(self.backupjournal, 'w')
94 self.backupsfile = opener.open(self.backupjournal, 'w')
95 if createmode is not None:
95 if createmode is not None:
96 opener.chmod(self.journal, createmode & 0666)
96 opener.chmod(self.journal, createmode & 0666)
97 opener.chmod(self.backupjournal, createmode & 0666)
97 opener.chmod(self.backupjournal, createmode & 0666)
98
98
99 # hold file generations to be performed on commit
99 # hold file generations to be performed on commit
100 self._filegenerators = {}
100 self._filegenerators = {}
101
101
102 def __del__(self):
102 def __del__(self):
103 if self.journal:
103 if self.journal:
104 self._abort()
104 self._abort()
105
105
106 @active
106 @active
107 def startgroup(self):
107 def startgroup(self):
108 self._queue.append(([], []))
108 self._queue.append(([], []))
109
109
110 @active
110 @active
111 def endgroup(self):
111 def endgroup(self):
112 q = self._queue.pop()
112 q = self._queue.pop()
113 self.entries.extend(q[0])
113 self.entries.extend(q[0])
114 self.backupentries.extend(q[1])
114 self.backupentries.extend(q[1])
115
115
116 offsets = []
116 offsets = []
117 backups = []
117 backups = []
118 for f, o, _data in q[0]:
118 for f, o, _data in q[0]:
119 offsets.append((f, o))
119 offsets.append((f, o))
120
120
121 for f, b, _data in q[1]:
121 for f, b, _data in q[1]:
122 backups.append((f, b))
122 backups.append((f, b))
123
123
124 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
124 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
125 self.file.write(d)
125 self.file.write(d)
126 self.file.flush()
126 self.file.flush()
127
127
128 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
128 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
129 self.backupsfile.write(d)
129 self.backupsfile.write(d)
130 self.backupsfile.flush()
130 self.backupsfile.flush()
131
131
132 @active
132 @active
133 def add(self, file, offset, data=None):
133 def add(self, file, offset, data=None):
134 if file in self.map or file in self.backupmap:
134 if file in self.map or file in self.backupmap:
135 return
135 return
136 if self._queue:
136 if self._queue:
137 self._queue[-1][0].append((file, offset, data))
137 self._queue[-1][0].append((file, offset, data))
138 return
138 return
139
139
140 self.entries.append((file, offset, data))
140 self.entries.append((file, offset, data))
141 self.map[file] = len(self.entries) - 1
141 self.map[file] = len(self.entries) - 1
142 # add enough data to the journal to do the truncate
142 # add enough data to the journal to do the truncate
143 self.file.write("%s\0%d\n" % (file, offset))
143 self.file.write("%s\0%d\n" % (file, offset))
144 self.file.flush()
144 self.file.flush()
145
145
146 @active
146 @active
147 def addbackup(self, file, hardlink=True):
147 def addbackup(self, file, hardlink=True, vfs=None):
148 """Adds a backup of the file to the transaction
148 """Adds a backup of the file to the transaction
149
149
150 Calling addbackup() creates a hardlink backup of the specified file
150 Calling addbackup() creates a hardlink backup of the specified file
151 that is used to recover the file in the event of the transaction
151 that is used to recover the file in the event of the transaction
152 aborting.
152 aborting.
153
153
154 * `file`: the file path, relative to .hg/store
154 * `file`: the file path, relative to .hg/store
155 * `hardlink`: use a hardlink to quickly create the backup
155 * `hardlink`: use a hardlink to quickly create the backup
156 """
156 """
157
157
158 if file in self.map or file in self.backupmap:
158 if file in self.map or file in self.backupmap:
159 return
159 return
160 backupfile = "%s.backup.%s" % (self.journal, file)
160 backupfile = "%s.backup.%s" % (self.journal, file)
161 if self.opener.exists(file):
161 if vfs is None:
162 filepath = self.opener.join(file)
162 vfs = self.opener
163 if vfs.exists(file):
164 filepath = vfs.join(file)
163 backuppath = self.opener.join(backupfile)
165 backuppath = self.opener.join(backupfile)
164 util.copyfiles(filepath, backuppath, hardlink=hardlink)
166 util.copyfiles(filepath, backuppath, hardlink=hardlink)
165 else:
167 else:
166 self.add(file, 0)
168 self.add(file, 0)
167 return
169 return
168
170
169 if self._queue:
171 if self._queue:
170 self._queue[-1][1].append((file, backupfile))
172 self._queue[-1][1].append((file, backupfile))
171 return
173 return
172
174
173 self.backupentries.append((file, backupfile, None))
175 self.backupentries.append((file, backupfile, None))
174 self.backupmap[file] = len(self.backupentries) - 1
176 self.backupmap[file] = len(self.backupentries) - 1
175 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
177 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
176 self.backupsfile.flush()
178 self.backupsfile.flush()
177
179
178 @active
180 @active
179 def addfilegenerator(self, genid, filenames, genfunc, order=0):
181 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
180 """add a function to generates some files at transaction commit
182 """add a function to generates some files at transaction commit
181
183
182 The `genfunc` argument is a function capable of generating proper
184 The `genfunc` argument is a function capable of generating proper
183 content of each entry in the `filename` tuple.
185 content of each entry in the `filename` tuple.
184
186
185 At transaction close time, `genfunc` will be called with one file
187 At transaction close time, `genfunc` will be called with one file
186 object argument per entries in `filenames`.
188 object argument per entries in `filenames`.
187
189
188 The transaction itself is responsible for the backup, creation and
190 The transaction itself is responsible for the backup, creation and
189 final write of such file.
191 final write of such file.
190
192
191 The `genid` argument is used to ensure the same set of file is only
193 The `genid` argument is used to ensure the same set of file is only
192 generated once. Call to `addfilegenerator` for a `genid` already
194 generated once. Call to `addfilegenerator` for a `genid` already
193 present will overwrite the old entry.
195 present will overwrite the old entry.
194
196
195 The `order` argument may be used to control the order in which multiple
197 The `order` argument may be used to control the order in which multiple
196 generator will be executed.
198 generator will be executed.
197 """
199 """
198 self._filegenerators[genid] = (order, filenames, genfunc)
200 # For now, we are unable to do proper backup and restore of custom vfs
201 # but for bookmarks that are handled outside this mechanism.
202 assert vfs is None or filenames == ('bookmarks',)
203 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
199
204
200 @active
205 @active
201 def find(self, file):
206 def find(self, file):
202 if file in self.map:
207 if file in self.map:
203 return self.entries[self.map[file]]
208 return self.entries[self.map[file]]
204 if file in self.backupmap:
209 if file in self.backupmap:
205 return self.backupentries[self.backupmap[file]]
210 return self.backupentries[self.backupmap[file]]
206 return None
211 return None
207
212
208 @active
213 @active
209 def replace(self, file, offset, data=None):
214 def replace(self, file, offset, data=None):
210 '''
215 '''
211 replace can only replace already committed entries
216 replace can only replace already committed entries
212 that are not pending in the queue
217 that are not pending in the queue
213 '''
218 '''
214
219
215 if file not in self.map:
220 if file not in self.map:
216 raise KeyError(file)
221 raise KeyError(file)
217 index = self.map[file]
222 index = self.map[file]
218 self.entries[index] = (file, offset, data)
223 self.entries[index] = (file, offset, data)
219 self.file.write("%s\0%d\n" % (file, offset))
224 self.file.write("%s\0%d\n" % (file, offset))
220 self.file.flush()
225 self.file.flush()
221
226
222 @active
227 @active
223 def nest(self):
228 def nest(self):
224 self.count += 1
229 self.count += 1
225 self.usages += 1
230 self.usages += 1
226 return self
231 return self
227
232
228 def release(self):
233 def release(self):
229 if self.count > 0:
234 if self.count > 0:
230 self.usages -= 1
235 self.usages -= 1
231 # if the transaction scopes are left without being closed, fail
236 # if the transaction scopes are left without being closed, fail
232 if self.count > 0 and self.usages == 0:
237 if self.count > 0 and self.usages == 0:
233 self._abort()
238 self._abort()
234
239
235 def running(self):
240 def running(self):
236 return self.count > 0
241 return self.count > 0
237
242
238 @active
243 @active
239 def close(self):
244 def close(self):
240 '''commit the transaction'''
245 '''commit the transaction'''
241 # write files registered for generation
246 # write files registered for generation
242 for order, filenames, genfunc in sorted(self._filegenerators.values()):
247 for entry in sorted(self._filegenerators.values()):
248 order, filenames, genfunc, vfs = entry
249 if vfs is None:
250 vfs = self.opener
243 files = []
251 files = []
244 try:
252 try:
245 for name in filenames:
253 for name in filenames:
246 # Some files are already backed up when creating the
254 # Some files are already backed up when creating the
247 # localrepo. Until this is properly fixed we disable the
255 # localrepo. Until this is properly fixed we disable the
248 # backup for them.
256 # backup for them.
249 if name not in ('phaseroots',):
257 if name not in ('phaseroots', 'bookmarks'):
250 self.addbackup(name)
258 self.addbackup(name)
251 files.append(self.opener(name, 'w', atomictemp=True))
259 files.append(vfs(name, 'w', atomictemp=True))
252 genfunc(*files)
260 genfunc(*files)
253 finally:
261 finally:
254 for f in files:
262 for f in files:
255 f.close()
263 f.close()
256
264
257 if self.count == 1 and self.onclose is not None:
265 if self.count == 1 and self.onclose is not None:
258 self.onclose()
266 self.onclose()
259
267
260 self.count -= 1
268 self.count -= 1
261 if self.count != 0:
269 if self.count != 0:
262 return
270 return
263 self.file.close()
271 self.file.close()
264 self.backupsfile.close()
272 self.backupsfile.close()
265 self.entries = []
273 self.entries = []
266 if self.after:
274 if self.after:
267 self.after()
275 self.after()
268 if self.opener.isfile(self.journal):
276 if self.opener.isfile(self.journal):
269 self.opener.unlink(self.journal)
277 self.opener.unlink(self.journal)
270 if self.opener.isfile(self.backupjournal):
278 if self.opener.isfile(self.backupjournal):
271 self.opener.unlink(self.backupjournal)
279 self.opener.unlink(self.backupjournal)
272 for _f, b, _ignore in self.backupentries:
280 for _f, b, _ignore in self.backupentries:
273 self.opener.unlink(b)
281 self.opener.unlink(b)
274 self.backupentries = []
282 self.backupentries = []
275 self.journal = None
283 self.journal = None
276
284
277 @active
285 @active
278 def abort(self):
286 def abort(self):
279 '''abort the transaction (generally called on error, or when the
287 '''abort the transaction (generally called on error, or when the
280 transaction is not explicitly committed before going out of
288 transaction is not explicitly committed before going out of
281 scope)'''
289 scope)'''
282 self._abort()
290 self._abort()
283
291
284 def _abort(self):
292 def _abort(self):
285 self.count = 0
293 self.count = 0
286 self.usages = 0
294 self.usages = 0
287 self.file.close()
295 self.file.close()
288 self.backupsfile.close()
296 self.backupsfile.close()
289
297
290 if self.onabort is not None:
298 if self.onabort is not None:
291 self.onabort()
299 self.onabort()
292
300
293 try:
301 try:
294 if not self.entries and not self.backupentries:
302 if not self.entries and not self.backupentries:
295 if self.journal:
303 if self.journal:
296 self.opener.unlink(self.journal)
304 self.opener.unlink(self.journal)
297 if self.backupjournal:
305 if self.backupjournal:
298 self.opener.unlink(self.backupjournal)
306 self.opener.unlink(self.backupjournal)
299 return
307 return
300
308
301 self.report(_("transaction abort!\n"))
309 self.report(_("transaction abort!\n"))
302
310
303 try:
311 try:
304 _playback(self.journal, self.report, self.opener,
312 _playback(self.journal, self.report, self.opener,
305 self.entries, self.backupentries, False)
313 self.entries, self.backupentries, False)
306 self.report(_("rollback completed\n"))
314 self.report(_("rollback completed\n"))
307 except Exception:
315 except Exception:
308 self.report(_("rollback failed - please run hg recover\n"))
316 self.report(_("rollback failed - please run hg recover\n"))
309 finally:
317 finally:
310 self.journal = None
318 self.journal = None
311
319
312
320
313 def rollback(opener, file, report):
321 def rollback(opener, file, report):
314 """Rolls back the transaction contained in the given file
322 """Rolls back the transaction contained in the given file
315
323
316 Reads the entries in the specified file, and the corresponding
324 Reads the entries in the specified file, and the corresponding
317 '*.backupfiles' file, to recover from an incomplete transaction.
325 '*.backupfiles' file, to recover from an incomplete transaction.
318
326
319 * `file`: a file containing a list of entries, specifying where
327 * `file`: a file containing a list of entries, specifying where
320 to truncate each file. The file should contain a list of
328 to truncate each file. The file should contain a list of
321 file\0offset pairs, delimited by newlines. The corresponding
329 file\0offset pairs, delimited by newlines. The corresponding
322 '*.backupfiles' file should contain a list of file\0backupfile
330 '*.backupfiles' file should contain a list of file\0backupfile
323 pairs, delimited by \0.
331 pairs, delimited by \0.
324 """
332 """
325 entries = []
333 entries = []
326 backupentries = []
334 backupentries = []
327
335
328 fp = opener.open(file)
336 fp = opener.open(file)
329 lines = fp.readlines()
337 lines = fp.readlines()
330 fp.close()
338 fp.close()
331 for l in lines:
339 for l in lines:
332 try:
340 try:
333 f, o = l.split('\0')
341 f, o = l.split('\0')
334 entries.append((f, int(o), None))
342 entries.append((f, int(o), None))
335 except ValueError:
343 except ValueError:
336 report(_("couldn't read journal entry %r!\n") % l)
344 report(_("couldn't read journal entry %r!\n") % l)
337
345
338 backupjournal = "%s.backupfiles" % file
346 backupjournal = "%s.backupfiles" % file
339 if opener.exists(backupjournal):
347 if opener.exists(backupjournal):
340 fp = opener.open(backupjournal)
348 fp = opener.open(backupjournal)
341 data = fp.read()
349 data = fp.read()
342 if len(data) > 0:
350 if len(data) > 0:
343 parts = data.split('\0')
351 parts = data.split('\0')
344 for i in xrange(0, len(parts), 2):
352 for i in xrange(0, len(parts), 2):
345 f, b = parts[i:i + 1]
353 f, b = parts[i:i + 1]
346 backupentries.append((f, b, None))
354 backupentries.append((f, b, None))
347
355
348 _playback(file, report, opener, entries, backupentries)
356 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now