##// END OF EJS Templates
transaction: allow generating file outside of store...
Pierre-Yves David -
r22663:4c619873 default
parent child Browse files
Show More
@@ -1,348 +1,356
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 def active(func):
19 19 def _active(self, *args, **kwds):
20 20 if self.count == 0:
21 21 raise error.Abort(_(
22 22 'cannot use transaction when it is already committed/aborted'))
23 23 return func(self, *args, **kwds)
24 24 return _active
25 25
26 26 def _playback(journal, report, opener, entries, backupentries, unlink=True):
27 27 for f, o, _ignore in entries:
28 28 if o or not unlink:
29 29 try:
30 30 fp = opener(f, 'a')
31 31 fp.truncate(o)
32 32 fp.close()
33 33 except IOError:
34 34 report(_("failed to truncate %s\n") % f)
35 35 raise
36 36 else:
37 37 try:
38 38 opener.unlink(f)
39 39 except (IOError, OSError), inst:
40 40 if inst.errno != errno.ENOENT:
41 41 raise
42 42
43 43 backupfiles = []
44 44 for f, b, _ignore in backupentries:
45 45 filepath = opener.join(f)
46 46 backuppath = opener.join(b)
47 47 try:
48 48 util.copyfile(backuppath, filepath)
49 49 backupfiles.append(b)
50 50 except IOError:
51 51 report(_("failed to recover %s\n") % f)
52 52 raise
53 53
54 54 opener.unlink(journal)
55 55 backuppath = "%s.backupfiles" % journal
56 56 if opener.exists(backuppath):
57 57 opener.unlink(backuppath)
58 58 for f in backupfiles:
59 59 opener.unlink(f)
60 60
61 61 class transaction(object):
62 62 def __init__(self, report, opener, journal, after=None, createmode=None,
63 63 onclose=None, onabort=None):
64 64 """Begin a new transaction
65 65
66 66 Begins a new transaction that allows rolling back writes in the event of
67 67 an exception.
68 68
69 69 * `after`: called after the transaction has been committed
70 70 * `createmode`: the mode of the journal file that will be created
71 71 * `onclose`: called as the transaction is closing, but before it is
72 72 closed
73 73 * `onabort`: called as the transaction is aborting, but before any files
74 74 have been truncated
75 75 """
76 76 self.count = 1
77 77 self.usages = 1
78 78 self.report = report
79 79 self.opener = opener
80 80 self.after = after
81 81 self.onclose = onclose
82 82 self.onabort = onabort
83 83 self.entries = []
84 84 self.backupentries = []
85 85 self.map = {}
86 86 self.backupmap = {}
87 87 self.journal = journal
88 88 self._queue = []
89 89 # a dict of arguments to be passed to hooks
90 90 self.hookargs = {}
91 91
92 92 self.backupjournal = "%s.backupfiles" % journal
93 93 self.file = opener.open(self.journal, "w")
94 94 self.backupsfile = opener.open(self.backupjournal, 'w')
95 95 if createmode is not None:
96 96 opener.chmod(self.journal, createmode & 0666)
97 97 opener.chmod(self.backupjournal, createmode & 0666)
98 98
99 99 # hold file generations to be performed on commit
100 100 self._filegenerators = {}
101 101
102 102 def __del__(self):
103 103 if self.journal:
104 104 self._abort()
105 105
106 106 @active
107 107 def startgroup(self):
108 108 self._queue.append(([], []))
109 109
110 110 @active
111 111 def endgroup(self):
112 112 q = self._queue.pop()
113 113 self.entries.extend(q[0])
114 114 self.backupentries.extend(q[1])
115 115
116 116 offsets = []
117 117 backups = []
118 118 for f, o, _data in q[0]:
119 119 offsets.append((f, o))
120 120
121 121 for f, b, _data in q[1]:
122 122 backups.append((f, b))
123 123
124 124 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
125 125 self.file.write(d)
126 126 self.file.flush()
127 127
128 128 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
129 129 self.backupsfile.write(d)
130 130 self.backupsfile.flush()
131 131
132 132 @active
133 133 def add(self, file, offset, data=None):
134 134 if file in self.map or file in self.backupmap:
135 135 return
136 136 if self._queue:
137 137 self._queue[-1][0].append((file, offset, data))
138 138 return
139 139
140 140 self.entries.append((file, offset, data))
141 141 self.map[file] = len(self.entries) - 1
142 142 # add enough data to the journal to do the truncate
143 143 self.file.write("%s\0%d\n" % (file, offset))
144 144 self.file.flush()
145 145
146 146 @active
147 def addbackup(self, file, hardlink=True):
147 def addbackup(self, file, hardlink=True, vfs=None):
148 148 """Adds a backup of the file to the transaction
149 149
150 150 Calling addbackup() creates a hardlink backup of the specified file
151 151 that is used to recover the file in the event of the transaction
152 152 aborting.
153 153
154 154 * `file`: the file path, relative to .hg/store
155 155 * `hardlink`: use a hardlink to quickly create the backup
156 156 """
157 157
158 158 if file in self.map or file in self.backupmap:
159 159 return
160 160 backupfile = "%s.backup.%s" % (self.journal, file)
161 if self.opener.exists(file):
162 filepath = self.opener.join(file)
161 if vfs is None:
162 vfs = self.opener
163 if vfs.exists(file):
164 filepath = vfs.join(file)
163 165 backuppath = self.opener.join(backupfile)
164 166 util.copyfiles(filepath, backuppath, hardlink=hardlink)
165 167 else:
166 168 self.add(file, 0)
167 169 return
168 170
169 171 if self._queue:
170 172 self._queue[-1][1].append((file, backupfile))
171 173 return
172 174
173 175 self.backupentries.append((file, backupfile, None))
174 176 self.backupmap[file] = len(self.backupentries) - 1
175 177 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
176 178 self.backupsfile.flush()
177 179
178 180 @active
179 def addfilegenerator(self, genid, filenames, genfunc, order=0):
181 def addfilegenerator(self, genid, filenames, genfunc, order=0, vfs=None):
180 182 """add a function to generates some files at transaction commit
181 183
182 184 The `genfunc` argument is a function capable of generating proper
183 185 content of each entry in the `filename` tuple.
184 186
185 187 At transaction close time, `genfunc` will be called with one file
186 188 object argument per entries in `filenames`.
187 189
188 190 The transaction itself is responsible for the backup, creation and
189 191 final write of such file.
190 192
191 193 The `genid` argument is used to ensure the same set of file is only
192 194 generated once. Call to `addfilegenerator` for a `genid` already
193 195 present will overwrite the old entry.
194 196
195 197 The `order` argument may be used to control the order in which multiple
196 198 generator will be executed.
197 199 """
198 self._filegenerators[genid] = (order, filenames, genfunc)
200 # For now, we are unable to do proper backup and restore of custom vfs
201 # but for bookmarks that are handled outside this mechanism.
202 assert vfs is None or filenames == ('bookmarks',)
203 self._filegenerators[genid] = (order, filenames, genfunc, vfs)
199 204
200 205 @active
201 206 def find(self, file):
202 207 if file in self.map:
203 208 return self.entries[self.map[file]]
204 209 if file in self.backupmap:
205 210 return self.backupentries[self.backupmap[file]]
206 211 return None
207 212
208 213 @active
209 214 def replace(self, file, offset, data=None):
210 215 '''
211 216 replace can only replace already committed entries
212 217 that are not pending in the queue
213 218 '''
214 219
215 220 if file not in self.map:
216 221 raise KeyError(file)
217 222 index = self.map[file]
218 223 self.entries[index] = (file, offset, data)
219 224 self.file.write("%s\0%d\n" % (file, offset))
220 225 self.file.flush()
221 226
222 227 @active
223 228 def nest(self):
224 229 self.count += 1
225 230 self.usages += 1
226 231 return self
227 232
228 233 def release(self):
229 234 if self.count > 0:
230 235 self.usages -= 1
231 236 # if the transaction scopes are left without being closed, fail
232 237 if self.count > 0 and self.usages == 0:
233 238 self._abort()
234 239
235 240 def running(self):
236 241 return self.count > 0
237 242
238 243 @active
239 244 def close(self):
240 245 '''commit the transaction'''
241 246 # write files registered for generation
242 for order, filenames, genfunc in sorted(self._filegenerators.values()):
247 for entry in sorted(self._filegenerators.values()):
248 order, filenames, genfunc, vfs = entry
249 if vfs is None:
250 vfs = self.opener
243 251 files = []
244 252 try:
245 253 for name in filenames:
246 254 # Some files are already backed up when creating the
247 255 # localrepo. Until this is properly fixed we disable the
248 256 # backup for them.
249 if name not in ('phaseroots',):
257 if name not in ('phaseroots', 'bookmarks'):
250 258 self.addbackup(name)
251 files.append(self.opener(name, 'w', atomictemp=True))
259 files.append(vfs(name, 'w', atomictemp=True))
252 260 genfunc(*files)
253 261 finally:
254 262 for f in files:
255 263 f.close()
256 264
257 265 if self.count == 1 and self.onclose is not None:
258 266 self.onclose()
259 267
260 268 self.count -= 1
261 269 if self.count != 0:
262 270 return
263 271 self.file.close()
264 272 self.backupsfile.close()
265 273 self.entries = []
266 274 if self.after:
267 275 self.after()
268 276 if self.opener.isfile(self.journal):
269 277 self.opener.unlink(self.journal)
270 278 if self.opener.isfile(self.backupjournal):
271 279 self.opener.unlink(self.backupjournal)
272 280 for _f, b, _ignore in self.backupentries:
273 281 self.opener.unlink(b)
274 282 self.backupentries = []
275 283 self.journal = None
276 284
277 285 @active
278 286 def abort(self):
279 287 '''abort the transaction (generally called on error, or when the
280 288 transaction is not explicitly committed before going out of
281 289 scope)'''
282 290 self._abort()
283 291
284 292 def _abort(self):
285 293 self.count = 0
286 294 self.usages = 0
287 295 self.file.close()
288 296 self.backupsfile.close()
289 297
290 298 if self.onabort is not None:
291 299 self.onabort()
292 300
293 301 try:
294 302 if not self.entries and not self.backupentries:
295 303 if self.journal:
296 304 self.opener.unlink(self.journal)
297 305 if self.backupjournal:
298 306 self.opener.unlink(self.backupjournal)
299 307 return
300 308
301 309 self.report(_("transaction abort!\n"))
302 310
303 311 try:
304 312 _playback(self.journal, self.report, self.opener,
305 313 self.entries, self.backupentries, False)
306 314 self.report(_("rollback completed\n"))
307 315 except Exception:
308 316 self.report(_("rollback failed - please run hg recover\n"))
309 317 finally:
310 318 self.journal = None
311 319
312 320
313 321 def rollback(opener, file, report):
314 322 """Rolls back the transaction contained in the given file
315 323
316 324 Reads the entries in the specified file, and the corresponding
317 325 '*.backupfiles' file, to recover from an incomplete transaction.
318 326
319 327 * `file`: a file containing a list of entries, specifying where
320 328 to truncate each file. The file should contain a list of
321 329 file\0offset pairs, delimited by newlines. The corresponding
322 330 '*.backupfiles' file should contain a list of file\0backupfile
323 331 pairs, delimited by \0.
324 332 """
325 333 entries = []
326 334 backupentries = []
327 335
328 336 fp = opener.open(file)
329 337 lines = fp.readlines()
330 338 fp.close()
331 339 for l in lines:
332 340 try:
333 341 f, o = l.split('\0')
334 342 entries.append((f, int(o), None))
335 343 except ValueError:
336 344 report(_("couldn't read journal entry %r!\n") % l)
337 345
338 346 backupjournal = "%s.backupfiles" % file
339 347 if opener.exists(backupjournal):
340 348 fp = opener.open(backupjournal)
341 349 data = fp.read()
342 350 if len(data) > 0:
343 351 parts = data.split('\0')
344 352 for i in xrange(0, len(parts), 2):
345 353 f, b = parts[i:i + 1]
346 354 backupentries.append((f, b, None))
347 355
348 356 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now