##// END OF EJS Templates
transaction: only generate file when we actually close the transaction...
Pierre-Yves David -
r23103:29bfa964 stable
parent child Browse files
Show More
@@ -1,368 +1,368 b''
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 version = 1
19 19
20 20 def active(func):
21 21 def _active(self, *args, **kwds):
22 22 if self.count == 0:
23 23 raise error.Abort(_(
24 24 'cannot use transaction when it is already committed/aborted'))
25 25 return func(self, *args, **kwds)
26 26 return _active
27 27
28 28 def _playback(journal, report, opener, entries, backupentries, unlink=True):
29 29 for f, o, _ignore in entries:
30 30 if o or not unlink:
31 31 try:
32 32 fp = opener(f, 'a')
33 33 fp.truncate(o)
34 34 fp.close()
35 35 except IOError:
36 36 report(_("failed to truncate %s\n") % f)
37 37 raise
38 38 else:
39 39 try:
40 40 opener.unlink(f)
41 41 except (IOError, OSError), inst:
42 42 if inst.errno != errno.ENOENT:
43 43 raise
44 44
45 45 backupfiles = []
46 46 for f, b, _ignore in backupentries:
47 47 filepath = opener.join(f)
48 48 backuppath = opener.join(b)
49 49 try:
50 50 util.copyfile(backuppath, filepath)
51 51 backupfiles.append(b)
52 52 except IOError:
53 53 report(_("failed to recover %s\n") % f)
54 54 raise
55 55
56 56 opener.unlink(journal)
57 57 backuppath = "%s.backupfiles" % journal
58 58 if opener.exists(backuppath):
59 59 opener.unlink(backuppath)
60 60 for f in backupfiles:
61 61 opener.unlink(f)
62 62
63 63 class transaction(object):
64 64 def __init__(self, report, opener, journal, after=None, createmode=None,
65 65 onclose=None, onabort=None):
66 66 """Begin a new transaction
67 67
68 68 Begins a new transaction that allows rolling back writes in the event of
69 69 an exception.
70 70
71 71 * `after`: called after the transaction has been committed
72 72 * `createmode`: the mode of the journal file that will be created
73 73 * `onclose`: called as the transaction is closing, but before it is
74 74 closed
75 75 * `onabort`: called as the transaction is aborting, but before any files
76 76 have been truncated
77 77 """
78 78 self.count = 1
79 79 self.usages = 1
80 80 self.report = report
81 81 self.opener = opener
82 82 self.after = after
83 83 self.onclose = onclose
84 84 self.onabort = onabort
85 85 self.entries = []
86 86 self.backupentries = []
87 87 self.map = {}
88 88 self.backupmap = {}
89 89 self.journal = journal
90 90 self._queue = []
91 91 # a dict of arguments to be passed to hooks
92 92 self.hookargs = {}
93 93
94 94 self.backupjournal = "%s.backupfiles" % journal
95 95 self.file = opener.open(self.journal, "w")
96 96 self.backupsfile = opener.open(self.backupjournal, 'w')
97 97 self.backupsfile.write('%d\n' % version)
98 98 if createmode is not None:
99 99 opener.chmod(self.journal, createmode & 0666)
100 100 opener.chmod(self.backupjournal, createmode & 0666)
101 101
102 102 # hold file generations to be performed on commit
103 103 self._filegenerators = {}
104 104
105 105 def __del__(self):
106 106 if self.journal:
107 107 self._abort()
108 108
109 109 @active
110 110 def startgroup(self):
111 111 self._queue.append(([], []))
112 112
113 113 @active
114 114 def endgroup(self):
115 115 q = self._queue.pop()
116 116 self.entries.extend(q[0])
117 117 self.backupentries.extend(q[1])
118 118
119 119 offsets = []
120 120 backups = []
121 121 for f, o, _data in q[0]:
122 122 offsets.append((f, o))
123 123
124 124 for f, b, _data in q[1]:
125 125 backups.append((f, b))
126 126
127 127 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
128 128 self.file.write(d)
129 129 self.file.flush()
130 130
131 131 d = ''.join(['%s\0%s\n' % (f, b) for f, b in backups])
132 132 self.backupsfile.write(d)
133 133 self.backupsfile.flush()
134 134
135 135 @active
136 136 def add(self, file, offset, data=None):
137 137 if file in self.map or file in self.backupmap:
138 138 return
139 139 if self._queue:
140 140 self._queue[-1][0].append((file, offset, data))
141 141 return
142 142
143 143 self.entries.append((file, offset, data))
144 144 self.map[file] = len(self.entries) - 1
145 145 # add enough data to the journal to do the truncate
146 146 self.file.write("%s\0%d\n" % (file, offset))
147 147 self.file.flush()
148 148
149 149 @active
150 150 def addbackup(self, file, hardlink=True, vfs=None):
151 151 """Adds a backup of the file to the transaction
152 152
153 153 Calling addbackup() creates a hardlink backup of the specified file
154 154 that is used to recover the file in the event of the transaction
155 155 aborting.
156 156
157 157 * `file`: the file path, relative to .hg/store
158 158 * `hardlink`: use a hardlink to quickly create the backup
159 159 """
160 160
161 161 if file in self.map or file in self.backupmap:
162 162 return
163 163 backupfile = "%s.backup.%s" % (self.journal, file)
164 164 if vfs is None:
165 165 vfs = self.opener
166 166 if vfs.exists(file):
167 167 filepath = vfs.join(file)
168 168 backuppath = self.opener.join(backupfile)
169 169 util.copyfiles(filepath, backuppath, hardlink=hardlink)
170 170 else:
171 171 self.add(file, 0)
172 172 return
173 173
174 174 if self._queue:
175 175 self._queue[-1][1].append((file, backupfile))
176 176 return
177 177
178 178 self.backupentries.append((file, backupfile, None))
179 179 self.backupmap[file] = len(self.backupentries) - 1
180 180 self.backupsfile.write("%s\0%s\n" % (file, backupfile))
181 181 self.backupsfile.flush()
182 182
183 183 @active
184 184 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
185 185 """add a function to generates some files at transaction commit
186 186
187 187 The `genfunc` argument is a function capable of generating proper
188 188 content of each entry in the `filename` tuple.
189 189
190 190 At transaction close time, `genfunc` will be called with one file
191 191 object argument per entries in `filenames`.
192 192
193 193 The transaction itself is responsible for the backup, creation and
194 194 final write of such file.
195 195
196 196 The `genid` argument is used to ensure the same set of file is only
197 197 generated once. Call to `addfilegenerator` for a `genid` already
198 198 present will overwrite the old entry.
199 199
200 200 The `order` argument may be used to control the order in which multiple
201 201 generator will be executed.
202 202 """
203 203 # For now, we are unable to do proper backup and restore of custom vfs
204 204 # but for bookmarks that are handled outside this mechanism.
205 205 assert vfs is None or filenames == ('bookmarks',)
206 206 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
207 207
208 208 def _generatefiles(self):
209 209 # write files registered for generation
210 210 for entry in sorted(self._filegenerators.values()):
211 211 order, filenames, genfunc, vfs = entry
212 212 if vfs is None:
213 213 vfs = self.opener
214 214 files = []
215 215 try:
216 216 for name in filenames:
217 217 # Some files are already backed up when creating the
218 218 # localrepo. Until this is properly fixed we disable the
219 219 # backup for them.
220 220 if name not in ('phaseroots', 'bookmarks'):
221 221 self.addbackup(name)
222 222 files.append(vfs(name, 'w', atomictemp=True))
223 223 genfunc(*files)
224 224 finally:
225 225 for f in files:
226 226 f.close()
227 227
228 228 @active
229 229 def find(self, file):
230 230 if file in self.map:
231 231 return self.entries[self.map[file]]
232 232 if file in self.backupmap:
233 233 return self.backupentries[self.backupmap[file]]
234 234 return None
235 235
236 236 @active
237 237 def replace(self, file, offset, data=None):
238 238 '''
239 239 replace can only replace already committed entries
240 240 that are not pending in the queue
241 241 '''
242 242
243 243 if file not in self.map:
244 244 raise KeyError(file)
245 245 index = self.map[file]
246 246 self.entries[index] = (file, offset, data)
247 247 self.file.write("%s\0%d\n" % (file, offset))
248 248 self.file.flush()
249 249
250 250 @active
251 251 def nest(self):
252 252 self.count += 1
253 253 self.usages += 1
254 254 return self
255 255
256 256 def release(self):
257 257 if self.count > 0:
258 258 self.usages -= 1
259 259 # if the transaction scopes are left without being closed, fail
260 260 if self.count > 0 and self.usages == 0:
261 261 self._abort()
262 262
263 263 def running(self):
264 264 return self.count > 0
265 265
266 266 @active
267 267 def close(self):
268 268 '''commit the transaction'''
269 if self.count == 1 and self.onclose is not None:
269 270 self._generatefiles()
270 if self.count == 1 and self.onclose is not None:
271 271 self.onclose()
272 272
273 273 self.count -= 1
274 274 if self.count != 0:
275 275 return
276 276 self.file.close()
277 277 self.backupsfile.close()
278 278 self.entries = []
279 279 if self.after:
280 280 self.after()
281 281 if self.opener.isfile(self.journal):
282 282 self.opener.unlink(self.journal)
283 283 if self.opener.isfile(self.backupjournal):
284 284 self.opener.unlink(self.backupjournal)
285 285 for _f, b, _ignore in self.backupentries:
286 286 self.opener.unlink(b)
287 287 self.backupentries = []
288 288 self.journal = None
289 289
290 290 @active
291 291 def abort(self):
292 292 '''abort the transaction (generally called on error, or when the
293 293 transaction is not explicitly committed before going out of
294 294 scope)'''
295 295 self._abort()
296 296
297 297 def _abort(self):
298 298 self.count = 0
299 299 self.usages = 0
300 300 self.file.close()
301 301 self.backupsfile.close()
302 302
303 303 if self.onabort is not None:
304 304 self.onabort()
305 305
306 306 try:
307 307 if not self.entries and not self.backupentries:
308 308 if self.journal:
309 309 self.opener.unlink(self.journal)
310 310 if self.backupjournal:
311 311 self.opener.unlink(self.backupjournal)
312 312 return
313 313
314 314 self.report(_("transaction abort!\n"))
315 315
316 316 try:
317 317 _playback(self.journal, self.report, self.opener,
318 318 self.entries, self.backupentries, False)
319 319 self.report(_("rollback completed\n"))
320 320 except Exception:
321 321 self.report(_("rollback failed - please run hg recover\n"))
322 322 finally:
323 323 self.journal = None
324 324
325 325
326 326 def rollback(opener, file, report):
327 327 """Rolls back the transaction contained in the given file
328 328
329 329 Reads the entries in the specified file, and the corresponding
330 330 '*.backupfiles' file, to recover from an incomplete transaction.
331 331
332 332 * `file`: a file containing a list of entries, specifying where
333 333 to truncate each file. The file should contain a list of
334 334 file\0offset pairs, delimited by newlines. The corresponding
335 335 '*.backupfiles' file should contain a list of file\0backupfile
336 336 pairs, delimited by \0.
337 337 """
338 338 entries = []
339 339 backupentries = []
340 340
341 341 fp = opener.open(file)
342 342 lines = fp.readlines()
343 343 fp.close()
344 344 for l in lines:
345 345 try:
346 346 f, o = l.split('\0')
347 347 entries.append((f, int(o), None))
348 348 except ValueError:
349 349 report(_("couldn't read journal entry %r!\n") % l)
350 350
351 351 backupjournal = "%s.backupfiles" % file
352 352 if opener.exists(backupjournal):
353 353 fp = opener.open(backupjournal)
354 354 lines = fp.readlines()
355 355 if lines:
356 356 ver = lines[0][:-1]
357 357 if ver == str(version):
358 358 for line in lines[1:]:
359 359 if line:
360 360 # Shave off the trailing newline
361 361 line = line[:-1]
362 362 f, b = line.split('\0')
363 363 backupentries.append((f, b, None))
364 364 else:
365 365 report(_("journal was created by a newer version of "
366 366 "Mercurial"))
367 367
368 368 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now