##// END OF EJS Templates
transaction: write pending generated files...
Pierre-Yves David -
r23358:1b51d1b0 default
parent child Browse files
Show More
@@ -1,508 +1,509
1 # transaction.py - simple journaling scheme for mercurial
1 # transaction.py - simple journaling scheme for mercurial
2 #
2 #
3 # This transaction scheme is intended to gracefully handle program
3 # This transaction scheme is intended to gracefully handle program
4 # errors and interruptions. More serious failures like system crashes
4 # errors and interruptions. More serious failures like system crashes
5 # can be recovered with an fsck-like tool. As the whole repository is
5 # can be recovered with an fsck-like tool. As the whole repository is
6 # effectively log-structured, this should amount to simply truncating
6 # effectively log-structured, this should amount to simply truncating
7 # anything that isn't referenced in the changelog.
7 # anything that isn't referenced in the changelog.
8 #
8 #
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
9 # Copyright 2005, 2006 Matt Mackall <mpm@selenic.com>
10 #
10 #
11 # This software may be used and distributed according to the terms of the
11 # This software may be used and distributed according to the terms of the
12 # GNU General Public License version 2 or any later version.
12 # GNU General Public License version 2 or any later version.
13
13
14 from i18n import _
14 from i18n import _
15 import os
15 import os
16 import errno
16 import errno
17 import error, util
17 import error, util
18
18
19 version = 2
19 version = 2
20
20
21 def active(func):
21 def active(func):
22 def _active(self, *args, **kwds):
22 def _active(self, *args, **kwds):
23 if self.count == 0:
23 if self.count == 0:
24 raise error.Abort(_(
24 raise error.Abort(_(
25 'cannot use transaction when it is already committed/aborted'))
25 'cannot use transaction when it is already committed/aborted'))
26 return func(self, *args, **kwds)
26 return func(self, *args, **kwds)
27 return _active
27 return _active
28
28
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
29 def _playback(journal, report, opener, vfsmap, entries, backupentries,
30 unlink=True):
30 unlink=True):
31 for f, o, _ignore in entries:
31 for f, o, _ignore in entries:
32 if o or not unlink:
32 if o or not unlink:
33 try:
33 try:
34 fp = opener(f, 'a')
34 fp = opener(f, 'a')
35 fp.truncate(o)
35 fp.truncate(o)
36 fp.close()
36 fp.close()
37 except IOError:
37 except IOError:
38 report(_("failed to truncate %s\n") % f)
38 report(_("failed to truncate %s\n") % f)
39 raise
39 raise
40 else:
40 else:
41 try:
41 try:
42 opener.unlink(f)
42 opener.unlink(f)
43 except (IOError, OSError), inst:
43 except (IOError, OSError), inst:
44 if inst.errno != errno.ENOENT:
44 if inst.errno != errno.ENOENT:
45 raise
45 raise
46
46
47 backupfiles = []
47 backupfiles = []
48 for l, f, b, c in backupentries:
48 for l, f, b, c in backupentries:
49 if l not in vfsmap and c:
49 if l not in vfsmap and c:
50 report("couldn't handle %s: unknown cache location %s\n"
50 report("couldn't handle %s: unknown cache location %s\n"
51 % (b, l))
51 % (b, l))
52 vfs = vfsmap[l]
52 vfs = vfsmap[l]
53 try:
53 try:
54 if f and b:
54 if f and b:
55 filepath = vfs.join(f)
55 filepath = vfs.join(f)
56 backuppath = vfs.join(b)
56 backuppath = vfs.join(b)
57 try:
57 try:
58 util.copyfile(backuppath, filepath)
58 util.copyfile(backuppath, filepath)
59 backupfiles.append(b)
59 backupfiles.append(b)
60 except IOError:
60 except IOError:
61 report(_("failed to recover %s\n") % f)
61 report(_("failed to recover %s\n") % f)
62 else:
62 else:
63 target = f or b
63 target = f or b
64 try:
64 try:
65 vfs.unlink(target)
65 vfs.unlink(target)
66 except (IOError, OSError), inst:
66 except (IOError, OSError), inst:
67 if inst.errno != errno.ENOENT:
67 if inst.errno != errno.ENOENT:
68 raise
68 raise
69 except (IOError, OSError, util.Abort), inst:
69 except (IOError, OSError, util.Abort), inst:
70 if not c:
70 if not c:
71 raise
71 raise
72
72
73 opener.unlink(journal)
73 opener.unlink(journal)
74 backuppath = "%s.backupfiles" % journal
74 backuppath = "%s.backupfiles" % journal
75 if opener.exists(backuppath):
75 if opener.exists(backuppath):
76 opener.unlink(backuppath)
76 opener.unlink(backuppath)
77 try:
77 try:
78 for f in backupfiles:
78 for f in backupfiles:
79 if opener.exists(f):
79 if opener.exists(f):
80 opener.unlink(f)
80 opener.unlink(f)
81 except (IOError, OSError, util.Abort), inst:
81 except (IOError, OSError, util.Abort), inst:
82 # only pure backup file remains, it is sage to ignore any error
82 # only pure backup file remains, it is sage to ignore any error
83 pass
83 pass
84
84
85 class transaction(object):
85 class transaction(object):
86 def __init__(self, report, opener, vfsmap, journal, after=None,
86 def __init__(self, report, opener, vfsmap, journal, after=None,
87 createmode=None, onclose=None, onabort=None):
87 createmode=None, onclose=None, onabort=None):
88 """Begin a new transaction
88 """Begin a new transaction
89
89
90 Begins a new transaction that allows rolling back writes in the event of
90 Begins a new transaction that allows rolling back writes in the event of
91 an exception.
91 an exception.
92
92
93 * `after`: called after the transaction has been committed
93 * `after`: called after the transaction has been committed
94 * `createmode`: the mode of the journal file that will be created
94 * `createmode`: the mode of the journal file that will be created
95 * `onclose`: called as the transaction is closing, but before it is
95 * `onclose`: called as the transaction is closing, but before it is
96 closed
96 closed
97 * `onabort`: called as the transaction is aborting, but before any files
97 * `onabort`: called as the transaction is aborting, but before any files
98 have been truncated
98 have been truncated
99 """
99 """
100 self.count = 1
100 self.count = 1
101 self.usages = 1
101 self.usages = 1
102 self.report = report
102 self.report = report
103 # a vfs to the store content
103 # a vfs to the store content
104 self.opener = opener
104 self.opener = opener
105 # a map to access file in various {location -> vfs}
105 # a map to access file in various {location -> vfs}
106 vfsmap = vfsmap.copy()
106 vfsmap = vfsmap.copy()
107 vfsmap[''] = opener # set default value
107 vfsmap[''] = opener # set default value
108 self._vfsmap = vfsmap
108 self._vfsmap = vfsmap
109 self.after = after
109 self.after = after
110 self.onclose = onclose
110 self.onclose = onclose
111 self.onabort = onabort
111 self.onabort = onabort
112 self.entries = []
112 self.entries = []
113 self.map = {}
113 self.map = {}
114 self.journal = journal
114 self.journal = journal
115 self._queue = []
115 self._queue = []
116 # a dict of arguments to be passed to hooks
116 # a dict of arguments to be passed to hooks
117 self.hookargs = {}
117 self.hookargs = {}
118 self.file = opener.open(self.journal, "w")
118 self.file = opener.open(self.journal, "w")
119
119
120 # a list of ('location', 'path', 'backuppath', cache) entries.
120 # a list of ('location', 'path', 'backuppath', cache) entries.
121 # - if 'backuppath' is empty, no file existed at backup time
121 # - if 'backuppath' is empty, no file existed at backup time
122 # - if 'path' is empty, this is a temporary transaction file
122 # - if 'path' is empty, this is a temporary transaction file
123 # - if 'location' is not empty, the path is outside main opener reach.
123 # - if 'location' is not empty, the path is outside main opener reach.
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
124 # use 'location' value as a key in a vfsmap to find the right 'vfs'
125 # (cache is currently unused)
125 # (cache is currently unused)
126 self._backupentries = []
126 self._backupentries = []
127 self._backupmap = {}
127 self._backupmap = {}
128 self._backupjournal = "%s.backupfiles" % journal
128 self._backupjournal = "%s.backupfiles" % journal
129 self._backupsfile = opener.open(self._backupjournal, 'w')
129 self._backupsfile = opener.open(self._backupjournal, 'w')
130 self._backupsfile.write('%d\n' % version)
130 self._backupsfile.write('%d\n' % version)
131
131
132 if createmode is not None:
132 if createmode is not None:
133 opener.chmod(self.journal, createmode & 0666)
133 opener.chmod(self.journal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
134 opener.chmod(self._backupjournal, createmode & 0666)
135
135
136 # hold file generations to be performed on commit
136 # hold file generations to be performed on commit
137 self._filegenerators = {}
137 self._filegenerators = {}
138 # hold callbalk to write pending data for hooks
138 # hold callbalk to write pending data for hooks
139 self._pendingcallback = {}
139 self._pendingcallback = {}
140 # True is any pending data have been written ever
140 # True is any pending data have been written ever
141 self._anypending = False
141 self._anypending = False
142 # holds callback to call when writing the transaction
142 # holds callback to call when writing the transaction
143 self._finalizecallback = {}
143 self._finalizecallback = {}
144 # hold callbalk for post transaction close
144 # hold callbalk for post transaction close
145 self._postclosecallback = {}
145 self._postclosecallback = {}
146
146
147 def __del__(self):
147 def __del__(self):
148 if self.journal:
148 if self.journal:
149 self._abort()
149 self._abort()
150
150
151 @active
151 @active
152 def startgroup(self):
152 def startgroup(self):
153 """delay registration of file entry
153 """delay registration of file entry
154
154
155 This is used by strip to delay vision of strip offset. The transaction
155 This is used by strip to delay vision of strip offset. The transaction
156 sees either none or all of the strip actions to be done."""
156 sees either none or all of the strip actions to be done."""
157 self._queue.append([])
157 self._queue.append([])
158
158
159 @active
159 @active
160 def endgroup(self):
160 def endgroup(self):
161 """apply delayed registration of file entry.
161 """apply delayed registration of file entry.
162
162
163 This is used by strip to delay vision of strip offset. The transaction
163 This is used by strip to delay vision of strip offset. The transaction
164 sees either none or all of the strip actions to be done."""
164 sees either none or all of the strip actions to be done."""
165 q = self._queue.pop()
165 q = self._queue.pop()
166 for f, o, data in q:
166 for f, o, data in q:
167 self._addentry(f, o, data)
167 self._addentry(f, o, data)
168
168
169 @active
169 @active
170 def add(self, file, offset, data=None):
170 def add(self, file, offset, data=None):
171 """record the state of an append-only file before update"""
171 """record the state of an append-only file before update"""
172 if file in self.map or file in self._backupmap:
172 if file in self.map or file in self._backupmap:
173 return
173 return
174 if self._queue:
174 if self._queue:
175 self._queue[-1].append((file, offset, data))
175 self._queue[-1].append((file, offset, data))
176 return
176 return
177
177
178 self._addentry(file, offset, data)
178 self._addentry(file, offset, data)
179
179
180 def _addentry(self, file, offset, data):
180 def _addentry(self, file, offset, data):
181 """add a append-only entry to memory and on-disk state"""
181 """add a append-only entry to memory and on-disk state"""
182 if file in self.map or file in self._backupmap:
182 if file in self.map or file in self._backupmap:
183 return
183 return
184 self.entries.append((file, offset, data))
184 self.entries.append((file, offset, data))
185 self.map[file] = len(self.entries) - 1
185 self.map[file] = len(self.entries) - 1
186 # add enough data to the journal to do the truncate
186 # add enough data to the journal to do the truncate
187 self.file.write("%s\0%d\n" % (file, offset))
187 self.file.write("%s\0%d\n" % (file, offset))
188 self.file.flush()
188 self.file.flush()
189
189
190 @active
190 @active
191 def addbackup(self, file, hardlink=True, location=''):
191 def addbackup(self, file, hardlink=True, location=''):
192 """Adds a backup of the file to the transaction
192 """Adds a backup of the file to the transaction
193
193
194 Calling addbackup() creates a hardlink backup of the specified file
194 Calling addbackup() creates a hardlink backup of the specified file
195 that is used to recover the file in the event of the transaction
195 that is used to recover the file in the event of the transaction
196 aborting.
196 aborting.
197
197
198 * `file`: the file path, relative to .hg/store
198 * `file`: the file path, relative to .hg/store
199 * `hardlink`: use a hardlink to quickly create the backup
199 * `hardlink`: use a hardlink to quickly create the backup
200 """
200 """
201 if self._queue:
201 if self._queue:
202 msg = 'cannot use transaction.addbackup inside "group"'
202 msg = 'cannot use transaction.addbackup inside "group"'
203 raise RuntimeError(msg)
203 raise RuntimeError(msg)
204
204
205 if file in self.map or file in self._backupmap:
205 if file in self.map or file in self._backupmap:
206 return
206 return
207 dirname, filename = os.path.split(file)
207 dirname, filename = os.path.split(file)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
208 backupfilename = "%s.backup.%s" % (self.journal, filename)
209 backupfile = os.path.join(dirname, backupfilename)
209 backupfile = os.path.join(dirname, backupfilename)
210 vfs = self._vfsmap[location]
210 vfs = self._vfsmap[location]
211 if vfs.exists(file):
211 if vfs.exists(file):
212 filepath = vfs.join(file)
212 filepath = vfs.join(file)
213 backuppath = vfs.join(backupfile)
213 backuppath = vfs.join(backupfile)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
214 util.copyfiles(filepath, backuppath, hardlink=hardlink)
215 else:
215 else:
216 backupfile = ''
216 backupfile = ''
217
217
218 self._addbackupentry((location, file, backupfile, False))
218 self._addbackupentry((location, file, backupfile, False))
219
219
220 def _addbackupentry(self, entry):
220 def _addbackupentry(self, entry):
221 """register a new backup entry and write it to disk"""
221 """register a new backup entry and write it to disk"""
222 self._backupentries.append(entry)
222 self._backupentries.append(entry)
223 self._backupmap[file] = len(self._backupentries) - 1
223 self._backupmap[file] = len(self._backupentries) - 1
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
224 self._backupsfile.write("%s\0%s\0%s\0%d\n" % entry)
225 self._backupsfile.flush()
225 self._backupsfile.flush()
226
226
227 @active
227 @active
228 def registertmp(self, tmpfile, location=''):
228 def registertmp(self, tmpfile, location=''):
229 """register a temporary transaction file
229 """register a temporary transaction file
230
230
231 Such files will be deleted when the transaction exits (on both
231 Such files will be deleted when the transaction exits (on both
232 failure and success).
232 failure and success).
233 """
233 """
234 self._addbackupentry((location, '', tmpfile, False))
234 self._addbackupentry((location, '', tmpfile, False))
235
235
236 @active
236 @active
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
237 def addfilegenerator(self, genid, filenames, genfunc, order=0,
238 location=''):
238 location=''):
239 """add a function to generates some files at transaction commit
239 """add a function to generates some files at transaction commit
240
240
241 The `genfunc` argument is a function capable of generating proper
241 The `genfunc` argument is a function capable of generating proper
242 content of each entry in the `filename` tuple.
242 content of each entry in the `filename` tuple.
243
243
244 At transaction close time, `genfunc` will be called with one file
244 At transaction close time, `genfunc` will be called with one file
245 object argument per entries in `filenames`.
245 object argument per entries in `filenames`.
246
246
247 The transaction itself is responsible for the backup, creation and
247 The transaction itself is responsible for the backup, creation and
248 final write of such file.
248 final write of such file.
249
249
250 The `genid` argument is used to ensure the same set of file is only
250 The `genid` argument is used to ensure the same set of file is only
251 generated once. Call to `addfilegenerator` for a `genid` already
251 generated once. Call to `addfilegenerator` for a `genid` already
252 present will overwrite the old entry.
252 present will overwrite the old entry.
253
253
254 The `order` argument may be used to control the order in which multiple
254 The `order` argument may be used to control the order in which multiple
255 generator will be executed.
255 generator will be executed.
256
256
257 The `location` arguments may be used to indicate the files are located
257 The `location` arguments may be used to indicate the files are located
258 outside of the the standard directory for transaction. It should match
258 outside of the the standard directory for transaction. It should match
259 one of the key of the `transaction.vfsmap` dictionnary.
259 one of the key of the `transaction.vfsmap` dictionnary.
260 """
260 """
261 # For now, we are unable to do proper backup and restore of custom vfs
261 # For now, we are unable to do proper backup and restore of custom vfs
262 # but for bookmarks that are handled outside this mechanism.
262 # but for bookmarks that are handled outside this mechanism.
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
263 self._filegenerators[genid] = (order, filenames, genfunc, location)
264
264
265 def _generatefiles(self, suffix=''):
265 def _generatefiles(self, suffix=''):
266 # write files registered for generation
266 # write files registered for generation
267 any = False
267 any = False
268 for entry in sorted(self._filegenerators.values()):
268 for entry in sorted(self._filegenerators.values()):
269 any = True
269 any = True
270 order, filenames, genfunc, location = entry
270 order, filenames, genfunc, location = entry
271 vfs = self._vfsmap[location]
271 vfs = self._vfsmap[location]
272 files = []
272 files = []
273 try:
273 try:
274 for name in filenames:
274 for name in filenames:
275 name += suffix
275 name += suffix
276 if suffix:
276 if suffix:
277 self.registertmp(name, location=location)
277 self.registertmp(name, location=location)
278 else:
278 else:
279 self.addbackup(name, location=location)
279 self.addbackup(name, location=location)
280 files.append(vfs(name, 'w', atomictemp=True))
280 files.append(vfs(name, 'w', atomictemp=True))
281 genfunc(*files)
281 genfunc(*files)
282 finally:
282 finally:
283 for f in files:
283 for f in files:
284 f.close()
284 f.close()
285 return any
285 return any
286
286
287 @active
287 @active
288 def find(self, file):
288 def find(self, file):
289 if file in self.map:
289 if file in self.map:
290 return self.entries[self.map[file]]
290 return self.entries[self.map[file]]
291 if file in self._backupmap:
291 if file in self._backupmap:
292 return self._backupentries[self._backupmap[file]]
292 return self._backupentries[self._backupmap[file]]
293 return None
293 return None
294
294
295 @active
295 @active
296 def replace(self, file, offset, data=None):
296 def replace(self, file, offset, data=None):
297 '''
297 '''
298 replace can only replace already committed entries
298 replace can only replace already committed entries
299 that are not pending in the queue
299 that are not pending in the queue
300 '''
300 '''
301
301
302 if file not in self.map:
302 if file not in self.map:
303 raise KeyError(file)
303 raise KeyError(file)
304 index = self.map[file]
304 index = self.map[file]
305 self.entries[index] = (file, offset, data)
305 self.entries[index] = (file, offset, data)
306 self.file.write("%s\0%d\n" % (file, offset))
306 self.file.write("%s\0%d\n" % (file, offset))
307 self.file.flush()
307 self.file.flush()
308
308
309 @active
309 @active
310 def nest(self):
310 def nest(self):
311 self.count += 1
311 self.count += 1
312 self.usages += 1
312 self.usages += 1
313 return self
313 return self
314
314
315 def release(self):
315 def release(self):
316 if self.count > 0:
316 if self.count > 0:
317 self.usages -= 1
317 self.usages -= 1
318 # if the transaction scopes are left without being closed, fail
318 # if the transaction scopes are left without being closed, fail
319 if self.count > 0 and self.usages == 0:
319 if self.count > 0 and self.usages == 0:
320 self._abort()
320 self._abort()
321
321
322 def running(self):
322 def running(self):
323 return self.count > 0
323 return self.count > 0
324
324
325 def addpending(self, category, callback):
325 def addpending(self, category, callback):
326 """add a callback to be called when the transaction is pending
326 """add a callback to be called when the transaction is pending
327
327
328 The transaction will be given as callback's first argument.
328 The transaction will be given as callback's first argument.
329
329
330 Category is a unique identifier to allow overwriting an old callback
330 Category is a unique identifier to allow overwriting an old callback
331 with a newer callback.
331 with a newer callback.
332 """
332 """
333 self._pendingcallback[category] = callback
333 self._pendingcallback[category] = callback
334
334
335 @active
335 @active
336 def writepending(self):
336 def writepending(self):
337 '''write pending file to temporary version
337 '''write pending file to temporary version
338
338
339 This is used to allow hooks to view a transaction before commit'''
339 This is used to allow hooks to view a transaction before commit'''
340 categories = sorted(self._pendingcallback)
340 categories = sorted(self._pendingcallback)
341 for cat in categories:
341 for cat in categories:
342 # remove callback since the data will have been flushed
342 # remove callback since the data will have been flushed
343 any = self._pendingcallback.pop(cat)(self)
343 any = self._pendingcallback.pop(cat)(self)
344 self._anypending = self._anypending or any
344 self._anypending = self._anypending or any
345 self._anypending |= self._generatefiles(suffix='.pending')
345 return self._anypending
346 return self._anypending
346
347
347 @active
348 @active
348 def addfinalize(self, category, callback):
349 def addfinalize(self, category, callback):
349 """add a callback to be called when the transaction is closed
350 """add a callback to be called when the transaction is closed
350
351
351 The transaction will be given as callback's first argument.
352 The transaction will be given as callback's first argument.
352
353
353 Category is a unique identifier to allow overwriting old callbacks with
354 Category is a unique identifier to allow overwriting old callbacks with
354 newer callbacks.
355 newer callbacks.
355 """
356 """
356 self._finalizecallback[category] = callback
357 self._finalizecallback[category] = callback
357
358
358 @active
359 @active
359 def addpostclose(self, category, callback):
360 def addpostclose(self, category, callback):
360 """add a callback to be called after the transaction is closed
361 """add a callback to be called after the transaction is closed
361
362
362 The transaction will be given as callback's first argument.
363 The transaction will be given as callback's first argument.
363
364
364 Category is a unique identifier to allow overwriting an old callback
365 Category is a unique identifier to allow overwriting an old callback
365 with a newer callback.
366 with a newer callback.
366 """
367 """
367 self._postclosecallback[category] = callback
368 self._postclosecallback[category] = callback
368
369
369 @active
370 @active
370 def close(self):
371 def close(self):
371 '''commit the transaction'''
372 '''commit the transaction'''
372 if self.count == 1:
373 if self.count == 1:
373 self._generatefiles()
374 self._generatefiles()
374 categories = sorted(self._finalizecallback)
375 categories = sorted(self._finalizecallback)
375 for cat in categories:
376 for cat in categories:
376 self._finalizecallback[cat](self)
377 self._finalizecallback[cat](self)
377 if self.onclose is not None:
378 if self.onclose is not None:
378 self.onclose()
379 self.onclose()
379
380
380 self.count -= 1
381 self.count -= 1
381 if self.count != 0:
382 if self.count != 0:
382 return
383 return
383 self.file.close()
384 self.file.close()
384 self._backupsfile.close()
385 self._backupsfile.close()
385 # cleanup temporary files
386 # cleanup temporary files
386 for l, f, b, c in self._backupentries:
387 for l, f, b, c in self._backupentries:
387 if l not in self._vfsmap and c:
388 if l not in self._vfsmap and c:
388 self.report("couldn't remote %s: unknown cache location %s\n"
389 self.report("couldn't remote %s: unknown cache location %s\n"
389 % (b, l))
390 % (b, l))
390 continue
391 continue
391 vfs = self._vfsmap[l]
392 vfs = self._vfsmap[l]
392 if not f and b and vfs.exists(b):
393 if not f and b and vfs.exists(b):
393 try:
394 try:
394 vfs.unlink(b)
395 vfs.unlink(b)
395 except (IOError, OSError, util.Abort), inst:
396 except (IOError, OSError, util.Abort), inst:
396 if not c:
397 if not c:
397 raise
398 raise
398 # Abort may be raise by read only opener
399 # Abort may be raise by read only opener
399 self.report("couldn't remote %s: %s\n"
400 self.report("couldn't remote %s: %s\n"
400 % (vfs.join(b), inst))
401 % (vfs.join(b), inst))
401 self.entries = []
402 self.entries = []
402 if self.after:
403 if self.after:
403 self.after()
404 self.after()
404 if self.opener.isfile(self.journal):
405 if self.opener.isfile(self.journal):
405 self.opener.unlink(self.journal)
406 self.opener.unlink(self.journal)
406 if self.opener.isfile(self._backupjournal):
407 if self.opener.isfile(self._backupjournal):
407 self.opener.unlink(self._backupjournal)
408 self.opener.unlink(self._backupjournal)
408 for _l, _f, b, c in self._backupentries:
409 for _l, _f, b, c in self._backupentries:
409 if l not in self._vfsmap and c:
410 if l not in self._vfsmap and c:
410 self.report("couldn't remote %s: unknown cache location"
411 self.report("couldn't remote %s: unknown cache location"
411 "%s\n" % (b, l))
412 "%s\n" % (b, l))
412 continue
413 continue
413 vfs = self._vfsmap[l]
414 vfs = self._vfsmap[l]
414 if b and vfs.exists(b):
415 if b and vfs.exists(b):
415 try:
416 try:
416 vfs.unlink(b)
417 vfs.unlink(b)
417 except (IOError, OSError, util.Abort), inst:
418 except (IOError, OSError, util.Abort), inst:
418 if not c:
419 if not c:
419 raise
420 raise
420 # Abort may be raise by read only opener
421 # Abort may be raise by read only opener
421 self.report("couldn't remote %s: %s\n"
422 self.report("couldn't remote %s: %s\n"
422 % (vfs.join(b), inst))
423 % (vfs.join(b), inst))
423 self._backupentries = []
424 self._backupentries = []
424 self.journal = None
425 self.journal = None
425 # run post close action
426 # run post close action
426 categories = sorted(self._postclosecallback)
427 categories = sorted(self._postclosecallback)
427 for cat in categories:
428 for cat in categories:
428 self._postclosecallback[cat](self)
429 self._postclosecallback[cat](self)
429
430
430 @active
431 @active
431 def abort(self):
432 def abort(self):
432 '''abort the transaction (generally called on error, or when the
433 '''abort the transaction (generally called on error, or when the
433 transaction is not explicitly committed before going out of
434 transaction is not explicitly committed before going out of
434 scope)'''
435 scope)'''
435 self._abort()
436 self._abort()
436
437
437 def _abort(self):
438 def _abort(self):
438 self.count = 0
439 self.count = 0
439 self.usages = 0
440 self.usages = 0
440 self.file.close()
441 self.file.close()
441 self._backupsfile.close()
442 self._backupsfile.close()
442
443
443 if self.onabort is not None:
444 if self.onabort is not None:
444 self.onabort()
445 self.onabort()
445
446
446 try:
447 try:
447 if not self.entries and not self._backupentries:
448 if not self.entries and not self._backupentries:
448 if self.journal:
449 if self.journal:
449 self.opener.unlink(self.journal)
450 self.opener.unlink(self.journal)
450 if self._backupjournal:
451 if self._backupjournal:
451 self.opener.unlink(self._backupjournal)
452 self.opener.unlink(self._backupjournal)
452 return
453 return
453
454
454 self.report(_("transaction abort!\n"))
455 self.report(_("transaction abort!\n"))
455
456
456 try:
457 try:
457 _playback(self.journal, self.report, self.opener, self._vfsmap,
458 _playback(self.journal, self.report, self.opener, self._vfsmap,
458 self.entries, self._backupentries, False)
459 self.entries, self._backupentries, False)
459 self.report(_("rollback completed\n"))
460 self.report(_("rollback completed\n"))
460 except Exception:
461 except Exception:
461 self.report(_("rollback failed - please run hg recover\n"))
462 self.report(_("rollback failed - please run hg recover\n"))
462 finally:
463 finally:
463 self.journal = None
464 self.journal = None
464
465
465
466
466 def rollback(opener, vfsmap, file, report):
467 def rollback(opener, vfsmap, file, report):
467 """Rolls back the transaction contained in the given file
468 """Rolls back the transaction contained in the given file
468
469
469 Reads the entries in the specified file, and the corresponding
470 Reads the entries in the specified file, and the corresponding
470 '*.backupfiles' file, to recover from an incomplete transaction.
471 '*.backupfiles' file, to recover from an incomplete transaction.
471
472
472 * `file`: a file containing a list of entries, specifying where
473 * `file`: a file containing a list of entries, specifying where
473 to truncate each file. The file should contain a list of
474 to truncate each file. The file should contain a list of
474 file\0offset pairs, delimited by newlines. The corresponding
475 file\0offset pairs, delimited by newlines. The corresponding
475 '*.backupfiles' file should contain a list of file\0backupfile
476 '*.backupfiles' file should contain a list of file\0backupfile
476 pairs, delimited by \0.
477 pairs, delimited by \0.
477 """
478 """
478 entries = []
479 entries = []
479 backupentries = []
480 backupentries = []
480
481
481 fp = opener.open(file)
482 fp = opener.open(file)
482 lines = fp.readlines()
483 lines = fp.readlines()
483 fp.close()
484 fp.close()
484 for l in lines:
485 for l in lines:
485 try:
486 try:
486 f, o = l.split('\0')
487 f, o = l.split('\0')
487 entries.append((f, int(o), None))
488 entries.append((f, int(o), None))
488 except ValueError:
489 except ValueError:
489 report(_("couldn't read journal entry %r!\n") % l)
490 report(_("couldn't read journal entry %r!\n") % l)
490
491
491 backupjournal = "%s.backupfiles" % file
492 backupjournal = "%s.backupfiles" % file
492 if opener.exists(backupjournal):
493 if opener.exists(backupjournal):
493 fp = opener.open(backupjournal)
494 fp = opener.open(backupjournal)
494 lines = fp.readlines()
495 lines = fp.readlines()
495 if lines:
496 if lines:
496 ver = lines[0][:-1]
497 ver = lines[0][:-1]
497 if ver == str(version):
498 if ver == str(version):
498 for line in lines[1:]:
499 for line in lines[1:]:
499 if line:
500 if line:
500 # Shave off the trailing newline
501 # Shave off the trailing newline
501 line = line[:-1]
502 line = line[:-1]
502 l, f, b, c = line.split('\0')
503 l, f, b, c = line.split('\0')
503 backupentries.append((l, f, b, bool(c)))
504 backupentries.append((l, f, b, bool(c)))
504 else:
505 else:
505 report(_("journal was created by a different version of "
506 report(_("journal was created by a different version of "
506 "Mercurial"))
507 "Mercurial"))
507
508
508 _playback(file, report, opener, vfsmap, entries, backupentries)
509 _playback(file, report, opener, vfsmap, entries, backupentries)
@@ -1,482 +1,499
1 Test exchange of common information using bundle2
1 Test exchange of common information using bundle2
2
2
3
3
4 $ getmainid() {
4 $ getmainid() {
5 > hg -R main log --template '{node}\n' --rev "$1"
5 > hg -R main log --template '{node}\n' --rev "$1"
6 > }
6 > }
7
7
8 enable obsolescence
8 enable obsolescence
9
9
10 $ cat >> $HGRCPATH << EOF
10 $ cat >> $HGRCPATH << EOF
11 > [experimental]
11 > [experimental]
12 > evolution=createmarkers,exchange
12 > evolution=createmarkers,exchange
13 > bundle2-exp=True
13 > bundle2-exp=True
14 > [ui]
14 > [ui]
15 > ssh=python "$TESTDIR/dummyssh"
15 > ssh=python "$TESTDIR/dummyssh"
16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
16 > logtemplate={rev}:{node|short} {phase} {author} {bookmarks} {desc|firstline}
17 > [web]
17 > [web]
18 > push_ssl = false
18 > push_ssl = false
19 > allow_push = *
19 > allow_push = *
20 > [phases]
20 > [phases]
21 > publish=False
21 > publish=False
22 > [hooks]
22 > [hooks]
23 > changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
23 > changegroup = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" changegroup"
24 > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
24 > b2x-transactionclose = sh -c "HG_LOCAL= python \"$TESTDIR/printenv.py\" b2x-transactionclose"
25 > EOF
25 > EOF
26
26
27 The extension requires a repo (currently unused)
27 The extension requires a repo (currently unused)
28
28
29 $ hg init main
29 $ hg init main
30 $ cd main
30 $ cd main
31 $ touch a
31 $ touch a
32 $ hg add a
32 $ hg add a
33 $ hg commit -m 'a'
33 $ hg commit -m 'a'
34
34
35 $ hg unbundle $TESTDIR/bundles/rebase.hg
35 $ hg unbundle $TESTDIR/bundles/rebase.hg
36 adding changesets
36 adding changesets
37 adding manifests
37 adding manifests
38 adding file changes
38 adding file changes
39 added 8 changesets with 7 changes to 7 files (+3 heads)
39 added 8 changesets with 7 changes to 7 files (+3 heads)
40 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=unbundle HG_URL=bundle:*/rebase.hg (glob)
40 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=unbundle HG_URL=bundle:*/rebase.hg (glob)
41 (run 'hg heads' to see heads, 'hg merge' to merge)
41 (run 'hg heads' to see heads, 'hg merge' to merge)
42
42
43 $ cd ..
43 $ cd ..
44
44
45 Real world exchange
45 Real world exchange
46 =====================
46 =====================
47
47
48 Add more obsolescence information
48 Add more obsolescence information
49
49
50 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
50 $ hg -R main debugobsolete -d '0 0' 1111111111111111111111111111111111111111 `getmainid 9520eea781bc`
51 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
51 $ hg -R main debugobsolete -d '0 0' 2222222222222222222222222222222222222222 `getmainid 24b6387c8c8c`
52
52
53 clone --pull
53 clone --pull
54
54
55 $ hg -R main phase --public cd010b8cd998
55 $ hg -R main phase --public cd010b8cd998
56 $ hg clone main other --pull --rev 9520eea781bc
56 $ hg clone main other --pull --rev 9520eea781bc
57 adding changesets
57 adding changesets
58 adding manifests
58 adding manifests
59 adding file changes
59 adding file changes
60 added 2 changesets with 2 changes to 2 files
60 added 2 changesets with 2 changes to 2 files
61 1 new obsolescence markers
61 1 new obsolescence markers
62 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
62 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
63 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
63 changegroup hook: HG_NODE=cd010b8cd998f3981a5a8115f94f8da4ab506089 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
64 updating to branch default
64 updating to branch default
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
65 2 files updated, 0 files merged, 0 files removed, 0 files unresolved
66 $ hg -R other log -G
66 $ hg -R other log -G
67 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
67 @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
68 |
68 |
69 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
69 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
70
70
71 $ hg -R other debugobsolete
71 $ hg -R other debugobsolete
72 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
72 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
73
73
74 pull
74 pull
75
75
76 $ hg -R main phase --public 9520eea781bc
76 $ hg -R main phase --public 9520eea781bc
77 $ hg -R other pull -r 24b6387c8c8c
77 $ hg -R other pull -r 24b6387c8c8c
78 pulling from $TESTTMP/main (glob)
78 pulling from $TESTTMP/main (glob)
79 searching for changes
79 searching for changes
80 adding changesets
80 adding changesets
81 adding manifests
81 adding manifests
82 adding file changes
82 adding file changes
83 added 1 changesets with 1 changes to 1 files (+1 heads)
83 added 1 changesets with 1 changes to 1 files (+1 heads)
84 1 new obsolescence markers
84 1 new obsolescence markers
85 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
85 b2x-transactionclose hook: HG_NEW_OBSMARKERS=1 HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
86 changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
86 changegroup hook: HG_NODE=24b6387c8c8cae37178880f3fa95ded3cb1cf785 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
87 (run 'hg heads' to see heads, 'hg merge' to merge)
87 (run 'hg heads' to see heads, 'hg merge' to merge)
88 $ hg -R other log -G
88 $ hg -R other log -G
89 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
89 o 2:24b6387c8c8c draft Nicolas Dumazet <nicdumz.commits@gmail.com> F
90 |
90 |
91 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
91 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
92 |/
92 |/
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
93 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
94
94
95 $ hg -R other debugobsolete
95 $ hg -R other debugobsolete
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
96 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
97 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
98
98
99 pull empty (with phase movement)
99 pull empty (with phase movement)
100
100
101 $ hg -R main phase --public 24b6387c8c8c
101 $ hg -R main phase --public 24b6387c8c8c
102 $ hg -R other pull -r 24b6387c8c8c
102 $ hg -R other pull -r 24b6387c8c8c
103 pulling from $TESTTMP/main (glob)
103 pulling from $TESTTMP/main (glob)
104 no changes found
104 no changes found
105 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
105 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
106 $ hg -R other log -G
106 $ hg -R other log -G
107 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
107 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
108 |
108 |
109 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
109 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
110 |/
110 |/
111 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
111 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
112
112
113 $ hg -R other debugobsolete
113 $ hg -R other debugobsolete
114 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
114 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
115 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
115 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
116
116
117 pull empty
117 pull empty
118
118
119 $ hg -R other pull -r 24b6387c8c8c
119 $ hg -R other pull -r 24b6387c8c8c
120 pulling from $TESTTMP/main (glob)
120 pulling from $TESTTMP/main (glob)
121 no changes found
121 no changes found
122 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
122 b2x-transactionclose hook: HG_NEW_OBSMARKERS=0 HG_SOURCE=pull HG_URL=file:$TESTTMP/main
123 $ hg -R other log -G
123 $ hg -R other log -G
124 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
124 o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
125 |
125 |
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
126 | @ 1:9520eea781bc draft Nicolas Dumazet <nicdumz.commits@gmail.com> E
127 |/
127 |/
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
128 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
129
129
130 $ hg -R other debugobsolete
130 $ hg -R other debugobsolete
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
131 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
132 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
133
133
134 add extra data to test their exchange during push
134 add extra data to test their exchange during push
135
135
136 $ hg -R main bookmark --rev eea13746799a book_eea1
136 $ hg -R main bookmark --rev eea13746799a book_eea1
137 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
137 $ hg -R main debugobsolete -d '0 0' 3333333333333333333333333333333333333333 `getmainid eea13746799a`
138 $ hg -R main bookmark --rev 02de42196ebe book_02de
138 $ hg -R main bookmark --rev 02de42196ebe book_02de
139 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
139 $ hg -R main debugobsolete -d '0 0' 4444444444444444444444444444444444444444 `getmainid 02de42196ebe`
140 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
140 $ hg -R main bookmark --rev 42ccdea3bb16 book_42cc
141 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
141 $ hg -R main debugobsolete -d '0 0' 5555555555555555555555555555555555555555 `getmainid 42ccdea3bb16`
142 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
142 $ hg -R main bookmark --rev 5fddd98957c8 book_5fdd
143 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
143 $ hg -R main debugobsolete -d '0 0' 6666666666666666666666666666666666666666 `getmainid 5fddd98957c8`
144 $ hg -R main bookmark --rev 32af7686d403 book_32af
144 $ hg -R main bookmark --rev 32af7686d403 book_32af
145 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
145 $ hg -R main debugobsolete -d '0 0' 7777777777777777777777777777777777777777 `getmainid 32af7686d403`
146
146
147 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
147 $ hg -R other bookmark --rev cd010b8cd998 book_eea1
148 $ hg -R other bookmark --rev cd010b8cd998 book_02de
148 $ hg -R other bookmark --rev cd010b8cd998 book_02de
149 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
149 $ hg -R other bookmark --rev cd010b8cd998 book_42cc
150 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
150 $ hg -R other bookmark --rev cd010b8cd998 book_5fdd
151 $ hg -R other bookmark --rev cd010b8cd998 book_32af
151 $ hg -R other bookmark --rev cd010b8cd998 book_32af
152
152
153 $ hg -R main phase --public eea13746799a
153 $ hg -R main phase --public eea13746799a
154
154
155 push
155 push
156 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
156 $ hg -R main push other --rev eea13746799a --bookmark book_eea1
157 pushing to other
157 pushing to other
158 searching for changes
158 searching for changes
159 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
159 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_PHASES_MOVED=1 HG_SOURCE=push HG_URL=push
160 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
160 changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=eea13746799a9e0bfd88f29d3c2e9dc9389f524f HG_SOURCE=push HG_URL=push
161 remote: adding changesets
161 remote: adding changesets
162 remote: adding manifests
162 remote: adding manifests
163 remote: adding file changes
163 remote: adding file changes
164 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
164 remote: added 1 changesets with 0 changes to 0 files (-1 heads)
165 remote: 1 new obsolescence markers
165 remote: 1 new obsolescence markers
166 updating bookmark book_eea1
166 updating bookmark book_eea1
167 $ hg -R other log -G
167 $ hg -R other log -G
168 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
168 o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
169 |\
169 |\
170 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
170 | o 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
171 | |
171 | |
172 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
172 @ | 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
173 |/
173 |/
174 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
174 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de book_32af book_42cc book_5fdd A
175
175
176 $ hg -R other debugobsolete
176 $ hg -R other debugobsolete
177 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
177 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
178 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
179 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
179 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
180
180
181 pull over ssh
181 pull over ssh
182
182
183 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
183 $ hg -R other pull ssh://user@dummy/main -r 02de42196ebe --bookmark book_02de
184 pulling from ssh://user@dummy/main
184 pulling from ssh://user@dummy/main
185 searching for changes
185 searching for changes
186 adding changesets
186 adding changesets
187 adding manifests
187 adding manifests
188 adding file changes
188 adding file changes
189 added 1 changesets with 1 changes to 1 files (+1 heads)
189 added 1 changesets with 1 changes to 1 files (+1 heads)
190 1 new obsolescence markers
190 1 new obsolescence markers
191 updating bookmark book_02de
191 updating bookmark book_02de
192 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
192 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
193 changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
193 changegroup hook: HG_NODE=02de42196ebee42ef284b6780a87cdc96e8eaab6 HG_SOURCE=pull HG_URL=ssh://user@dummy/main
194 (run 'hg heads' to see heads, 'hg merge' to merge)
194 (run 'hg heads' to see heads, 'hg merge' to merge)
195 $ hg -R other debugobsolete
195 $ hg -R other debugobsolete
196 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
196 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
197 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
198 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
199 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
199 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
200
200
201 pull over http
201 pull over http
202
202
203 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
203 $ hg -R main serve -p $HGPORT -d --pid-file=main.pid -E main-error.log
204 $ cat main.pid >> $DAEMON_PIDS
204 $ cat main.pid >> $DAEMON_PIDS
205
205
206 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
206 $ hg -R other pull http://localhost:$HGPORT/ -r 42ccdea3bb16 --bookmark book_42cc
207 pulling from http://localhost:$HGPORT/
207 pulling from http://localhost:$HGPORT/
208 searching for changes
208 searching for changes
209 adding changesets
209 adding changesets
210 adding manifests
210 adding manifests
211 adding file changes
211 adding file changes
212 added 1 changesets with 1 changes to 1 files (+1 heads)
212 added 1 changesets with 1 changes to 1 files (+1 heads)
213 1 new obsolescence markers
213 1 new obsolescence markers
214 updating bookmark book_42cc
214 updating bookmark book_42cc
215 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
215 b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_NEW_OBSMARKERS=1 HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_PHASES_MOVED=1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
216 changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
216 changegroup hook: HG_NODE=42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 HG_SOURCE=pull HG_URL=http://localhost:$HGPORT/
217 (run 'hg heads .' to see heads, 'hg merge' to merge)
217 (run 'hg heads .' to see heads, 'hg merge' to merge)
218 $ cat main-error.log
218 $ cat main-error.log
219 $ hg -R other debugobsolete
219 $ hg -R other debugobsolete
220 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
220 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
221 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
221 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
222 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
222 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
223 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
223 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
224 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
224 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
225
225
226 push over ssh
226 push over ssh
227
227
228 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
228 $ hg -R main push ssh://user@dummy/other -r 5fddd98957c8 --bookmark book_5fdd
229 pushing to ssh://user@dummy/other
229 pushing to ssh://user@dummy/other
230 searching for changes
230 searching for changes
231 remote: adding changesets
231 remote: adding changesets
232 remote: adding manifests
232 remote: adding manifests
233 remote: adding file changes
233 remote: adding file changes
234 remote: added 1 changesets with 1 changes to 1 files
234 remote: added 1 changesets with 1 changes to 1 files
235 remote: 1 new obsolescence markers
235 remote: 1 new obsolescence markers
236 updating bookmark book_5fdd
236 updating bookmark book_5fdd
237 remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
237 remote: b2x-transactionclose hook: HG_BOOKMARK_MOVED=1 HG_BUNDLE2-EXP=1 HG_NEW_OBSMARKERS=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
238 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
238 remote: changegroup hook: HG_BUNDLE2-EXP=1 HG_NODE=5fddd98957c8a54a4d436dfe1da9d87f21a1b97b HG_SOURCE=serve HG_URL=remote:ssh:127.0.0.1
239 $ hg -R other log -G
239 $ hg -R other log -G
240 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
240 o 6:5fddd98957c8 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
241 |
241 |
242 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
242 o 5:42ccdea3bb16 draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
243 |
243 |
244 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
244 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
245 | |
245 | |
246 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
246 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
247 | |/|
247 | |/|
248 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
248 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
249 |/ /
249 |/ /
250 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
250 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
251 |/
251 |/
252 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
252 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af A
253
253
254 $ hg -R other debugobsolete
254 $ hg -R other debugobsolete
255 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
255 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
256 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
256 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
257 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
257 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
258 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
258 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
259 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
259 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
260 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
260 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
261
261
262 push over http
262 push over http
263
263
264 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
264 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
265 $ cat other.pid >> $DAEMON_PIDS
265 $ cat other.pid >> $DAEMON_PIDS
266
266
267 $ hg -R main phase --public 32af7686d403
267 $ hg -R main phase --public 32af7686d403
268 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
268 $ hg -R main push http://localhost:$HGPORT2/ -r 32af7686d403 --bookmark book_32af
269 pushing to http://localhost:$HGPORT2/
269 pushing to http://localhost:$HGPORT2/
270 searching for changes
270 searching for changes
271 remote: adding changesets
271 remote: adding changesets
272 remote: adding manifests
272 remote: adding manifests
273 remote: adding file changes
273 remote: adding file changes
274 remote: added 1 changesets with 1 changes to 1 files
274 remote: added 1 changesets with 1 changes to 1 files
275 remote: 1 new obsolescence markers
275 remote: 1 new obsolescence markers
276 updating bookmark book_32af
276 updating bookmark book_32af
277 $ cat other-error.log
277 $ cat other-error.log
278
278
279 Check final content.
279 Check final content.
280
280
281 $ hg -R other log -G
281 $ hg -R other log -G
282 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
282 o 7:32af7686d403 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_32af D
283 |
283 |
284 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
284 o 6:5fddd98957c8 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_5fdd C
285 |
285 |
286 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
286 o 5:42ccdea3bb16 public Nicolas Dumazet <nicdumz.commits@gmail.com> book_42cc B
287 |
287 |
288 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
288 | o 4:02de42196ebe draft Nicolas Dumazet <nicdumz.commits@gmail.com> book_02de H
289 | |
289 | |
290 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
290 | | o 3:eea13746799a public Nicolas Dumazet <nicdumz.commits@gmail.com> book_eea1 G
291 | |/|
291 | |/|
292 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
292 | o | 2:24b6387c8c8c public Nicolas Dumazet <nicdumz.commits@gmail.com> F
293 |/ /
293 |/ /
294 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
294 | @ 1:9520eea781bc public Nicolas Dumazet <nicdumz.commits@gmail.com> E
295 |/
295 |/
296 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
296 o 0:cd010b8cd998 public Nicolas Dumazet <nicdumz.commits@gmail.com> A
297
297
298 $ hg -R other debugobsolete
298 $ hg -R other debugobsolete
299 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
299 1111111111111111111111111111111111111111 9520eea781bcca16c1e15acc0ba14335a0e8e5ba 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
300 2222222222222222222222222222222222222222 24b6387c8c8cae37178880f3fa95ded3cb1cf785 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
301 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
301 3333333333333333333333333333333333333333 eea13746799a9e0bfd88f29d3c2e9dc9389f524f 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
302 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
302 4444444444444444444444444444444444444444 02de42196ebee42ef284b6780a87cdc96e8eaab6 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
303 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
303 5555555555555555555555555555555555555555 42ccdea3bb16d28e1848c95fe2e44c000f3f21b1 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
304 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
304 6666666666666666666666666666666666666666 5fddd98957c8a54a4d436dfe1da9d87f21a1b97b 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
305 7777777777777777777777777777777777777777 32af7686d403cf45b5d95f2d70cebea587ac806a 0 (Thu Jan 01 00:00:00 1970 +0000) {'user': 'test'}
306
306
307 (check that no 'pending' files remain)
308
309 $ ls -1 other/.hg/bookmarks*
310 other/.hg/bookmarks
311 $ ls -1 other/.hg/store/phaseroots*
312 other/.hg/store/phaseroots
313 $ ls -1 other/.hg/store/00changelog.i*
314 other/.hg/store/00changelog.i
315
307 Error Handling
316 Error Handling
308 ==============
317 ==============
309
318
310 Check that errors are properly returned to the client during push.
319 Check that errors are properly returned to the client during push.
311
320
312 Setting up
321 Setting up
313
322
314 $ cat > failpush.py << EOF
323 $ cat > failpush.py << EOF
315 > """A small extension that makes push fails when using bundle2
324 > """A small extension that makes push fails when using bundle2
316 >
325 >
317 > used to test error handling in bundle2
326 > used to test error handling in bundle2
318 > """
327 > """
319 >
328 >
320 > from mercurial import util
329 > from mercurial import util
321 > from mercurial import bundle2
330 > from mercurial import bundle2
322 > from mercurial import exchange
331 > from mercurial import exchange
323 > from mercurial import extensions
332 > from mercurial import extensions
324 >
333 >
325 > def _pushbundle2failpart(pushop, bundler):
334 > def _pushbundle2failpart(pushop, bundler):
326 > reason = pushop.ui.config('failpush', 'reason', None)
335 > reason = pushop.ui.config('failpush', 'reason', None)
327 > part = None
336 > part = None
328 > if reason == 'abort':
337 > if reason == 'abort':
329 > bundler.newpart('test:abort')
338 > bundler.newpart('test:abort')
330 > if reason == 'unknown':
339 > if reason == 'unknown':
331 > bundler.newpart('TEST:UNKNOWN')
340 > bundler.newpart('TEST:UNKNOWN')
332 > if reason == 'race':
341 > if reason == 'race':
333 > # 20 Bytes of crap
342 > # 20 Bytes of crap
334 > bundler.newpart('b2x:check:heads', data='01234567890123456789')
343 > bundler.newpart('b2x:check:heads', data='01234567890123456789')
335 >
344 >
336 > @bundle2.parthandler("test:abort")
345 > @bundle2.parthandler("test:abort")
337 > def handleabort(op, part):
346 > def handleabort(op, part):
338 > raise util.Abort('Abandon ship!', hint="don't panic")
347 > raise util.Abort('Abandon ship!', hint="don't panic")
339 >
348 >
340 > def uisetup(ui):
349 > def uisetup(ui):
341 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
350 > exchange.b2partsgenmapping['failpart'] = _pushbundle2failpart
342 > exchange.b2partsgenorder.insert(0, 'failpart')
351 > exchange.b2partsgenorder.insert(0, 'failpart')
343 >
352 >
344 > EOF
353 > EOF
345
354
346 $ cd main
355 $ cd main
347 $ hg up tip
356 $ hg up tip
348 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
357 3 files updated, 0 files merged, 1 files removed, 0 files unresolved
349 $ echo 'I' > I
358 $ echo 'I' > I
350 $ hg add I
359 $ hg add I
351 $ hg ci -m 'I'
360 $ hg ci -m 'I'
352 $ hg id
361 $ hg id
353 e7ec4e813ba6 tip
362 e7ec4e813ba6 tip
354 $ cd ..
363 $ cd ..
355
364
356 $ cat << EOF >> $HGRCPATH
365 $ cat << EOF >> $HGRCPATH
357 > [extensions]
366 > [extensions]
358 > failpush=$TESTTMP/failpush.py
367 > failpush=$TESTTMP/failpush.py
359 > EOF
368 > EOF
360
369
361 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
370 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
362 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
371 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
363 $ cat other.pid >> $DAEMON_PIDS
372 $ cat other.pid >> $DAEMON_PIDS
364
373
365 Doing the actual push: Abort error
374 Doing the actual push: Abort error
366
375
367 $ cat << EOF >> $HGRCPATH
376 $ cat << EOF >> $HGRCPATH
368 > [failpush]
377 > [failpush]
369 > reason = abort
378 > reason = abort
370 > EOF
379 > EOF
371
380
372 $ hg -R main push other -r e7ec4e813ba6
381 $ hg -R main push other -r e7ec4e813ba6
373 pushing to other
382 pushing to other
374 searching for changes
383 searching for changes
375 abort: Abandon ship!
384 abort: Abandon ship!
376 (don't panic)
385 (don't panic)
377 [255]
386 [255]
378
387
379 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
388 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
380 pushing to ssh://user@dummy/other
389 pushing to ssh://user@dummy/other
381 searching for changes
390 searching for changes
382 abort: Abandon ship!
391 abort: Abandon ship!
383 (don't panic)
392 (don't panic)
384 [255]
393 [255]
385
394
386 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
395 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
387 pushing to http://localhost:$HGPORT2/
396 pushing to http://localhost:$HGPORT2/
388 searching for changes
397 searching for changes
389 abort: Abandon ship!
398 abort: Abandon ship!
390 (don't panic)
399 (don't panic)
391 [255]
400 [255]
392
401
393
402
394 Doing the actual push: unknown mandatory parts
403 Doing the actual push: unknown mandatory parts
395
404
396 $ cat << EOF >> $HGRCPATH
405 $ cat << EOF >> $HGRCPATH
397 > [failpush]
406 > [failpush]
398 > reason = unknown
407 > reason = unknown
399 > EOF
408 > EOF
400
409
401 $ hg -R main push other -r e7ec4e813ba6
410 $ hg -R main push other -r e7ec4e813ba6
402 pushing to other
411 pushing to other
403 searching for changes
412 searching for changes
404 abort: missing support for test:unknown
413 abort: missing support for test:unknown
405 [255]
414 [255]
406
415
407 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
416 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
408 pushing to ssh://user@dummy/other
417 pushing to ssh://user@dummy/other
409 searching for changes
418 searching for changes
410 abort: missing support for test:unknown
419 abort: missing support for test:unknown
411 [255]
420 [255]
412
421
413 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
422 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
414 pushing to http://localhost:$HGPORT2/
423 pushing to http://localhost:$HGPORT2/
415 searching for changes
424 searching for changes
416 abort: missing support for test:unknown
425 abort: missing support for test:unknown
417 [255]
426 [255]
418
427
419 Doing the actual push: race
428 Doing the actual push: race
420
429
421 $ cat << EOF >> $HGRCPATH
430 $ cat << EOF >> $HGRCPATH
422 > [failpush]
431 > [failpush]
423 > reason = race
432 > reason = race
424 > EOF
433 > EOF
425
434
426 $ hg -R main push other -r e7ec4e813ba6
435 $ hg -R main push other -r e7ec4e813ba6
427 pushing to other
436 pushing to other
428 searching for changes
437 searching for changes
429 abort: push failed:
438 abort: push failed:
430 'repository changed while pushing - please try again'
439 'repository changed while pushing - please try again'
431 [255]
440 [255]
432
441
433 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
442 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
434 pushing to ssh://user@dummy/other
443 pushing to ssh://user@dummy/other
435 searching for changes
444 searching for changes
436 abort: push failed:
445 abort: push failed:
437 'repository changed while pushing - please try again'
446 'repository changed while pushing - please try again'
438 [255]
447 [255]
439
448
440 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
449 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
441 pushing to http://localhost:$HGPORT2/
450 pushing to http://localhost:$HGPORT2/
442 searching for changes
451 searching for changes
443 abort: push failed:
452 abort: push failed:
444 'repository changed while pushing - please try again'
453 'repository changed while pushing - please try again'
445 [255]
454 [255]
446
455
447 Doing the actual push: hook abort
456 Doing the actual push: hook abort
448
457
449 $ cat << EOF >> $HGRCPATH
458 $ cat << EOF >> $HGRCPATH
450 > [failpush]
459 > [failpush]
451 > reason =
460 > reason =
452 > [hooks]
461 > [hooks]
453 > b2x-pretransactionclose.failpush = false
462 > b2x-pretransactionclose.failpush = false
454 > EOF
463 > EOF
455
464
456 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
465 $ "$TESTDIR/killdaemons.py" $DAEMON_PIDS
457 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
466 $ hg -R other serve -p $HGPORT2 -d --pid-file=other.pid -E other-error.log
458 $ cat other.pid >> $DAEMON_PIDS
467 $ cat other.pid >> $DAEMON_PIDS
459
468
460 $ hg -R main push other -r e7ec4e813ba6
469 $ hg -R main push other -r e7ec4e813ba6
461 pushing to other
470 pushing to other
462 searching for changes
471 searching for changes
463 transaction abort!
472 transaction abort!
464 rollback completed
473 rollback completed
465 abort: b2x-pretransactionclose.failpush hook exited with status 1
474 abort: b2x-pretransactionclose.failpush hook exited with status 1
466 [255]
475 [255]
467
476
468 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
477 $ hg -R main push ssh://user@dummy/other -r e7ec4e813ba6
469 pushing to ssh://user@dummy/other
478 pushing to ssh://user@dummy/other
470 searching for changes
479 searching for changes
471 abort: b2x-pretransactionclose.failpush hook exited with status 1
480 abort: b2x-pretransactionclose.failpush hook exited with status 1
472 remote: transaction abort!
481 remote: transaction abort!
473 remote: rollback completed
482 remote: rollback completed
474 [255]
483 [255]
475
484
476 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
485 $ hg -R main push http://localhost:$HGPORT2/ -r e7ec4e813ba6
477 pushing to http://localhost:$HGPORT2/
486 pushing to http://localhost:$HGPORT2/
478 searching for changes
487 searching for changes
479 abort: b2x-pretransactionclose.failpush hook exited with status 1
488 abort: b2x-pretransactionclose.failpush hook exited with status 1
480 [255]
489 [255]
481
490
491 (check that no 'pending' files remain)
482
492
493 $ ls -1 other/.hg/bookmarks*
494 other/.hg/bookmarks
495 $ ls -1 other/.hg/store/phaseroots*
496 other/.hg/store/phaseroots
497 $ ls -1 other/.hg/store/00changelog.i*
498 other/.hg/store/00changelog.i
499
General Comments 0
You need to be logged in to leave comments. Login now