##// END OF EJS Templates
cleanup: name unused variables using convention of leading _...
Mads Kiilerich -
r22204:f8dc6599 default
parent child Browse files
Show More
@@ -1,344 +1,344
1 1 # transaction.py - simple journaling scheme for mercurial
2 2 #
3 3 # This transaction scheme is intended to gracefully handle program
4 4 # errors and interruptions. More serious failures like system crashes
5 5 # can be recovered with an fsck-like tool. As the whole repository is
6 6 # effectively log-structured, this should amount to simply truncating
7 7 # anything that isn't referenced in the changelog.
8 8 #
9 9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 10 #
11 11 # This software may be used and distributed according to the terms of the
12 12 # GNU General Public License version 2 or any later version.
13 13
14 14 from i18n import _
15 15 import errno
16 16 import error, util
17 17
18 18 def active(func):
19 19 def _active(self, *args, **kwds):
20 20 if self.count == 0:
21 21 raise error.Abort(_(
22 22 'cannot use transaction when it is already committed/aborted'))
23 23 return func(self, *args, **kwds)
24 24 return _active
25 25
26 26 def _playback(journal, report, opener, entries, backupentries, unlink=True):
27 for f, o, ignore in entries:
27 for f, o, _ignore in entries:
28 28 if o or not unlink:
29 29 try:
30 30 fp = opener(f, 'a')
31 31 fp.truncate(o)
32 32 fp.close()
33 33 except IOError:
34 34 report(_("failed to truncate %s\n") % f)
35 35 raise
36 36 else:
37 37 try:
38 38 opener.unlink(f)
39 39 except (IOError, OSError), inst:
40 40 if inst.errno != errno.ENOENT:
41 41 raise
42 42
43 43 backupfiles = []
44 for f, b, ignore in backupentries:
44 for f, b, _ignore in backupentries:
45 45 filepath = opener.join(f)
46 46 backuppath = opener.join(b)
47 47 try:
48 48 util.copyfile(backuppath, filepath)
49 49 backupfiles.append(b)
50 50 except IOError:
51 51 report(_("failed to recover %s\n") % f)
52 52 raise
53 53
54 54 opener.unlink(journal)
55 55 backuppath = "%s.backupfiles" % journal
56 56 if opener.exists(backuppath):
57 57 opener.unlink(backuppath)
58 58 for f in backupfiles:
59 59 opener.unlink(f)
60 60
61 61 class transaction(object):
62 62 def __init__(self, report, opener, journal, after=None, createmode=None,
63 63 onclose=None, onabort=None):
64 64 """Begin a new transaction
65 65
66 66 Begins a new transaction that allows rolling back writes in the event of
67 67 an exception.
68 68
69 69 * `after`: called after the transaction has been committed
70 70 * `createmode`: the mode of the journal file that will be created
71 71 * `onclose`: called as the transaction is closing, but before it is
72 72 closed
73 73 * `onabort`: called as the transaction is aborting, but before any files
74 74 have been truncated
75 75 """
76 76 self.count = 1
77 77 self.usages = 1
78 78 self.report = report
79 79 self.opener = opener
80 80 self.after = after
81 81 self.onclose = onclose
82 82 self.onabort = onabort
83 83 self.entries = []
84 84 self.backupentries = []
85 85 self.map = {}
86 86 self.backupmap = {}
87 87 self.journal = journal
88 88 self._queue = []
89 89 # a dict of arguments to be passed to hooks
90 90 self.hookargs = {}
91 91
92 92 self.backupjournal = "%s.backupfiles" % journal
93 93 self.file = opener.open(self.journal, "w")
94 94 self.backupsfile = opener.open(self.backupjournal, 'w')
95 95 if createmode is not None:
96 96 opener.chmod(self.journal, createmode & 0666)
97 97 opener.chmod(self.backupjournal, createmode & 0666)
98 98
99 99 # hold file generations to be performed on commit
100 100 self._filegenerators = {}
101 101
102 102 def __del__(self):
103 103 if self.journal:
104 104 self._abort()
105 105
106 106 @active
107 107 def startgroup(self):
108 108 self._queue.append(([], []))
109 109
110 110 @active
111 111 def endgroup(self):
112 112 q = self._queue.pop()
113 113 self.entries.extend(q[0])
114 114 self.backupentries.extend(q[1])
115 115
116 116 offsets = []
117 117 backups = []
118 118 for f, o, _data in q[0]:
119 119 offsets.append((f, o))
120 120
121 121 for f, b, _data in q[1]:
122 122 backups.append((f, b))
123 123
124 124 d = ''.join(['%s\0%d\n' % (f, o) for f, o in offsets])
125 125 self.file.write(d)
126 126 self.file.flush()
127 127
128 128 d = ''.join(['%s\0%s\0' % (f, b) for f, b in backups])
129 129 self.backupsfile.write(d)
130 130 self.backupsfile.flush()
131 131
132 132 @active
133 133 def add(self, file, offset, data=None):
134 134 if file in self.map or file in self.backupmap:
135 135 return
136 136 if self._queue:
137 137 self._queue[-1][0].append((file, offset, data))
138 138 return
139 139
140 140 self.entries.append((file, offset, data))
141 141 self.map[file] = len(self.entries) - 1
142 142 # add enough data to the journal to do the truncate
143 143 self.file.write("%s\0%d\n" % (file, offset))
144 144 self.file.flush()
145 145
146 146 @active
147 147 def addbackup(self, file, hardlink=True):
148 148 """Adds a backup of the file to the transaction
149 149
150 150 Calling addbackup() creates a hardlink backup of the specified file
151 151 that is used to recover the file in the event of the transaction
152 152 aborting.
153 153
154 154 * `file`: the file path, relative to .hg/store
155 155 * `hardlink`: use a hardlink to quickly create the backup
156 156 """
157 157
158 158 if file in self.map or file in self.backupmap:
159 159 return
160 160 backupfile = "%s.backup.%s" % (self.journal, file)
161 161 if self.opener.exists(file):
162 162 filepath = self.opener.join(file)
163 163 backuppath = self.opener.join(backupfile)
164 164 util.copyfiles(filepath, backuppath, hardlink=hardlink)
165 165 else:
166 166 self.add(file, 0)
167 167 return
168 168
169 169 if self._queue:
170 170 self._queue[-1][1].append((file, backupfile))
171 171 return
172 172
173 173 self.backupentries.append((file, backupfile, None))
174 174 self.backupmap[file] = len(self.backupentries) - 1
175 175 self.backupsfile.write("%s\0%s\0" % (file, backupfile))
176 176 self.backupsfile.flush()
177 177
178 178 @active
179 179 def addfilegenerator(self, genid, filenames, genfunc, order=0):
180 180 """add a function to generates some files at transaction commit
181 181
182 182 The `genfunc` argument is a function capable of generating proper
183 183 content of each entry in the `filename` tuple.
184 184
185 185 At transaction close time, `genfunc` will be called with one file
186 186 object argument per entries in `filenames`.
187 187
188 188 The transaction itself is responsible for the backup, creation and
189 189 final write of such file.
190 190
191 191 The `genid` argument is used to ensure the same set of file is only
192 192 generated once. Call to `addfilegenerator` for a `genid` already
193 193 present will overwrite the old entry.
194 194
195 195 The `order` argument may be used to control the order in which multiple
196 196 generator will be executed.
197 197 """
198 198 self._filegenerators[genid] = (order, filenames, genfunc)
199 199
200 200 @active
201 201 def find(self, file):
202 202 if file in self.map:
203 203 return self.entries[self.map[file]]
204 204 if file in self.backupmap:
205 205 return self.backupentries[self.backupmap[file]]
206 206 return None
207 207
208 208 @active
209 209 def replace(self, file, offset, data=None):
210 210 '''
211 211 replace can only replace already committed entries
212 212 that are not pending in the queue
213 213 '''
214 214
215 215 if file not in self.map:
216 216 raise KeyError(file)
217 217 index = self.map[file]
218 218 self.entries[index] = (file, offset, data)
219 219 self.file.write("%s\0%d\n" % (file, offset))
220 220 self.file.flush()
221 221
222 222 @active
223 223 def nest(self):
224 224 self.count += 1
225 225 self.usages += 1
226 226 return self
227 227
228 228 def release(self):
229 229 if self.count > 0:
230 230 self.usages -= 1
231 231 # if the transaction scopes are left without being closed, fail
232 232 if self.count > 0 and self.usages == 0:
233 233 self._abort()
234 234
235 235 def running(self):
236 236 return self.count > 0
237 237
238 238 @active
239 239 def close(self):
240 240 '''commit the transaction'''
241 241 # write files registered for generation
242 242 for order, filenames, genfunc in sorted(self._filegenerators.values()):
243 243 files = []
244 244 try:
245 245 for name in filenames:
246 246 self.addbackup(name)
247 247 files.append(self.opener(name, 'w', atomictemp=True))
248 248 genfunc(*files)
249 249 finally:
250 250 for f in files:
251 251 f.close()
252 252
253 253 if self.count == 1 and self.onclose is not None:
254 254 self.onclose()
255 255
256 256 self.count -= 1
257 257 if self.count != 0:
258 258 return
259 259 self.file.close()
260 260 self.backupsfile.close()
261 261 self.entries = []
262 262 if self.after:
263 263 self.after()
264 264 if self.opener.isfile(self.journal):
265 265 self.opener.unlink(self.journal)
266 266 if self.opener.isfile(self.backupjournal):
267 267 self.opener.unlink(self.backupjournal)
268 for f, b, _ignore in self.backupentries:
268 for _f, b, _ignore in self.backupentries:
269 269 self.opener.unlink(b)
270 270 self.backupentries = []
271 271 self.journal = None
272 272
273 273 @active
274 274 def abort(self):
275 275 '''abort the transaction (generally called on error, or when the
276 276 transaction is not explicitly committed before going out of
277 277 scope)'''
278 278 self._abort()
279 279
280 280 def _abort(self):
281 281 self.count = 0
282 282 self.usages = 0
283 283 self.file.close()
284 284 self.backupsfile.close()
285 285
286 286 if self.onabort is not None:
287 287 self.onabort()
288 288
289 289 try:
290 290 if not self.entries and not self.backupentries:
291 291 if self.journal:
292 292 self.opener.unlink(self.journal)
293 293 if self.backupjournal:
294 294 self.opener.unlink(self.backupjournal)
295 295 return
296 296
297 297 self.report(_("transaction abort!\n"))
298 298
299 299 try:
300 300 _playback(self.journal, self.report, self.opener,
301 301 self.entries, self.backupentries, False)
302 302 self.report(_("rollback completed\n"))
303 303 except Exception:
304 304 self.report(_("rollback failed - please run hg recover\n"))
305 305 finally:
306 306 self.journal = None
307 307
308 308
309 309 def rollback(opener, file, report):
310 310 """Rolls back the transaction contained in the given file
311 311
312 312 Reads the entries in the specified file, and the corresponding
313 313 '*.backupfiles' file, to recover from an incomplete transaction.
314 314
315 315 * `file`: a file containing a list of entries, specifying where
316 316 to truncate each file. The file should contain a list of
317 317 file\0offset pairs, delimited by newlines. The corresponding
318 318 '*.backupfiles' file should contain a list of file\0backupfile
319 319 pairs, delimited by \0.
320 320 """
321 321 entries = []
322 322 backupentries = []
323 323
324 324 fp = opener.open(file)
325 325 lines = fp.readlines()
326 326 fp.close()
327 327 for l in lines:
328 328 try:
329 329 f, o = l.split('\0')
330 330 entries.append((f, int(o), None))
331 331 except ValueError:
332 332 report(_("couldn't read journal entry %r!\n") % l)
333 333
334 334 backupjournal = "%s.backupfiles" % file
335 335 if opener.exists(backupjournal):
336 336 fp = opener.open(backupjournal)
337 337 data = fp.read()
338 338 if len(data) > 0:
339 339 parts = data.split('\0')
340 340 for i in xrange(0, len(parts), 2):
341 341 f, b = parts[i:i + 1]
342 342 backupentries.append((f, b, None))
343 343
344 344 _playback(file, report, opener, entries, backupentries)
General Comments 0
You need to be logged in to leave comments. Login now