##// END OF EJS Templates
localrepo.commit: normalize commit message even for rawcommit....
Alexis S. L. Carvalho -
r6254:3667b6e4 default
parent child Browse files
Show More
@@ -1,2124 +1,2123 b''
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui
12 import lock, transaction, stat, errno, ui
13 import os, revlog, time, util, extensions, hook, inspect
13 import os, revlog, time, util, extensions, hook, inspect
14
14
15 class localrepository(repo.repository):
15 class localrepository(repo.repository):
16 capabilities = util.set(('lookup', 'changegroupsubset'))
16 capabilities = util.set(('lookup', 'changegroupsubset'))
17 supported = ('revlogv1', 'store')
17 supported = ('revlogv1', 'store')
18
18
19 def __init__(self, parentui, path=None, create=0):
19 def __init__(self, parentui, path=None, create=0):
20 repo.repository.__init__(self)
20 repo.repository.__init__(self)
21 self.root = os.path.realpath(path)
21 self.root = os.path.realpath(path)
22 self.path = os.path.join(self.root, ".hg")
22 self.path = os.path.join(self.root, ".hg")
23 self.origroot = path
23 self.origroot = path
24 self.opener = util.opener(self.path)
24 self.opener = util.opener(self.path)
25 self.wopener = util.opener(self.root)
25 self.wopener = util.opener(self.root)
26
26
27 if not os.path.isdir(self.path):
27 if not os.path.isdir(self.path):
28 if create:
28 if create:
29 if not os.path.exists(path):
29 if not os.path.exists(path):
30 os.mkdir(path)
30 os.mkdir(path)
31 os.mkdir(self.path)
31 os.mkdir(self.path)
32 requirements = ["revlogv1"]
32 requirements = ["revlogv1"]
33 if parentui.configbool('format', 'usestore', True):
33 if parentui.configbool('format', 'usestore', True):
34 os.mkdir(os.path.join(self.path, "store"))
34 os.mkdir(os.path.join(self.path, "store"))
35 requirements.append("store")
35 requirements.append("store")
36 # create an invalid changelog
36 # create an invalid changelog
37 self.opener("00changelog.i", "a").write(
37 self.opener("00changelog.i", "a").write(
38 '\0\0\0\2' # represents revlogv2
38 '\0\0\0\2' # represents revlogv2
39 ' dummy changelog to prevent using the old repo layout'
39 ' dummy changelog to prevent using the old repo layout'
40 )
40 )
41 reqfile = self.opener("requires", "w")
41 reqfile = self.opener("requires", "w")
42 for r in requirements:
42 for r in requirements:
43 reqfile.write("%s\n" % r)
43 reqfile.write("%s\n" % r)
44 reqfile.close()
44 reqfile.close()
45 else:
45 else:
46 raise repo.RepoError(_("repository %s not found") % path)
46 raise repo.RepoError(_("repository %s not found") % path)
47 elif create:
47 elif create:
48 raise repo.RepoError(_("repository %s already exists") % path)
48 raise repo.RepoError(_("repository %s already exists") % path)
49 else:
49 else:
50 # find requirements
50 # find requirements
51 try:
51 try:
52 requirements = self.opener("requires").read().splitlines()
52 requirements = self.opener("requires").read().splitlines()
53 except IOError, inst:
53 except IOError, inst:
54 if inst.errno != errno.ENOENT:
54 if inst.errno != errno.ENOENT:
55 raise
55 raise
56 requirements = []
56 requirements = []
57 # check them
57 # check them
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
60 raise repo.RepoError(_("requirement '%s' not supported") % r)
61
61
62 # setup store
62 # setup store
63 if "store" in requirements:
63 if "store" in requirements:
64 self.encodefn = util.encodefilename
64 self.encodefn = util.encodefilename
65 self.decodefn = util.decodefilename
65 self.decodefn = util.decodefilename
66 self.spath = os.path.join(self.path, "store")
66 self.spath = os.path.join(self.path, "store")
67 else:
67 else:
68 self.encodefn = lambda x: x
68 self.encodefn = lambda x: x
69 self.decodefn = lambda x: x
69 self.decodefn = lambda x: x
70 self.spath = self.path
70 self.spath = self.path
71
71
72 try:
72 try:
73 # files in .hg/ will be created using this mode
73 # files in .hg/ will be created using this mode
74 mode = os.stat(self.spath).st_mode
74 mode = os.stat(self.spath).st_mode
75 # avoid some useless chmods
75 # avoid some useless chmods
76 if (0777 & ~util._umask) == (0777 & mode):
76 if (0777 & ~util._umask) == (0777 & mode):
77 mode = None
77 mode = None
78 except OSError:
78 except OSError:
79 mode = None
79 mode = None
80
80
81 self._createmode = mode
81 self._createmode = mode
82 self.opener.createmode = mode
82 self.opener.createmode = mode
83 sopener = util.opener(self.spath)
83 sopener = util.opener(self.spath)
84 sopener.createmode = mode
84 sopener.createmode = mode
85 self.sopener = util.encodedopener(sopener, self.encodefn)
85 self.sopener = util.encodedopener(sopener, self.encodefn)
86
86
87 self.ui = ui.ui(parentui=parentui)
87 self.ui = ui.ui(parentui=parentui)
88 try:
88 try:
89 self.ui.readconfig(self.join("hgrc"), self.root)
89 self.ui.readconfig(self.join("hgrc"), self.root)
90 extensions.loadall(self.ui)
90 extensions.loadall(self.ui)
91 except IOError:
91 except IOError:
92 pass
92 pass
93
93
94 self.tagscache = None
94 self.tagscache = None
95 self._tagstypecache = None
95 self._tagstypecache = None
96 self.branchcache = None
96 self.branchcache = None
97 self._ubranchcache = None # UTF-8 version of branchcache
97 self._ubranchcache = None # UTF-8 version of branchcache
98 self._branchcachetip = None
98 self._branchcachetip = None
99 self.nodetagscache = None
99 self.nodetagscache = None
100 self.filterpats = {}
100 self.filterpats = {}
101 self._datafilters = {}
101 self._datafilters = {}
102 self._transref = self._lockref = self._wlockref = None
102 self._transref = self._lockref = self._wlockref = None
103
103
104 def __getattr__(self, name):
104 def __getattr__(self, name):
105 if name == 'changelog':
105 if name == 'changelog':
106 self.changelog = changelog.changelog(self.sopener)
106 self.changelog = changelog.changelog(self.sopener)
107 self.sopener.defversion = self.changelog.version
107 self.sopener.defversion = self.changelog.version
108 return self.changelog
108 return self.changelog
109 if name == 'manifest':
109 if name == 'manifest':
110 self.changelog
110 self.changelog
111 self.manifest = manifest.manifest(self.sopener)
111 self.manifest = manifest.manifest(self.sopener)
112 return self.manifest
112 return self.manifest
113 if name == 'dirstate':
113 if name == 'dirstate':
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
114 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
115 return self.dirstate
115 return self.dirstate
116 else:
116 else:
117 raise AttributeError, name
117 raise AttributeError, name
118
118
119 def url(self):
119 def url(self):
120 return 'file:' + self.root
120 return 'file:' + self.root
121
121
122 def hook(self, name, throw=False, **args):
122 def hook(self, name, throw=False, **args):
123 return hook.hook(self.ui, self, name, throw, **args)
123 return hook.hook(self.ui, self, name, throw, **args)
124
124
125 tag_disallowed = ':\r\n'
125 tag_disallowed = ':\r\n'
126
126
127 def _tag(self, name, node, message, local, user, date, parent=None,
127 def _tag(self, name, node, message, local, user, date, parent=None,
128 extra={}):
128 extra={}):
129 use_dirstate = parent is None
129 use_dirstate = parent is None
130
130
131 for c in self.tag_disallowed:
131 for c in self.tag_disallowed:
132 if c in name:
132 if c in name:
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
133 raise util.Abort(_('%r cannot be used in a tag name') % c)
134
134
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
135 self.hook('pretag', throw=True, node=hex(node), tag=name, local=local)
136
136
137 def writetag(fp, name, munge, prevtags):
137 def writetag(fp, name, munge, prevtags):
138 fp.seek(0, 2)
138 fp.seek(0, 2)
139 if prevtags and prevtags[-1] != '\n':
139 if prevtags and prevtags[-1] != '\n':
140 fp.write('\n')
140 fp.write('\n')
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
141 fp.write('%s %s\n' % (hex(node), munge and munge(name) or name))
142 fp.close()
142 fp.close()
143
143
144 prevtags = ''
144 prevtags = ''
145 if local:
145 if local:
146 try:
146 try:
147 fp = self.opener('localtags', 'r+')
147 fp = self.opener('localtags', 'r+')
148 except IOError, err:
148 except IOError, err:
149 fp = self.opener('localtags', 'a')
149 fp = self.opener('localtags', 'a')
150 else:
150 else:
151 prevtags = fp.read()
151 prevtags = fp.read()
152
152
153 # local tags are stored in the current charset
153 # local tags are stored in the current charset
154 writetag(fp, name, None, prevtags)
154 writetag(fp, name, None, prevtags)
155 self.hook('tag', node=hex(node), tag=name, local=local)
155 self.hook('tag', node=hex(node), tag=name, local=local)
156 return
156 return
157
157
158 if use_dirstate:
158 if use_dirstate:
159 try:
159 try:
160 fp = self.wfile('.hgtags', 'rb+')
160 fp = self.wfile('.hgtags', 'rb+')
161 except IOError, err:
161 except IOError, err:
162 fp = self.wfile('.hgtags', 'ab')
162 fp = self.wfile('.hgtags', 'ab')
163 else:
163 else:
164 prevtags = fp.read()
164 prevtags = fp.read()
165 else:
165 else:
166 try:
166 try:
167 prevtags = self.filectx('.hgtags', parent).data()
167 prevtags = self.filectx('.hgtags', parent).data()
168 except revlog.LookupError:
168 except revlog.LookupError:
169 pass
169 pass
170 fp = self.wfile('.hgtags', 'wb')
170 fp = self.wfile('.hgtags', 'wb')
171 if prevtags:
171 if prevtags:
172 fp.write(prevtags)
172 fp.write(prevtags)
173
173
174 # committed tags are stored in UTF-8
174 # committed tags are stored in UTF-8
175 writetag(fp, name, util.fromlocal, prevtags)
175 writetag(fp, name, util.fromlocal, prevtags)
176
176
177 if use_dirstate and '.hgtags' not in self.dirstate:
177 if use_dirstate and '.hgtags' not in self.dirstate:
178 self.add(['.hgtags'])
178 self.add(['.hgtags'])
179
179
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
180 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
181 extra=extra)
181 extra=extra)
182
182
183 self.hook('tag', node=hex(node), tag=name, local=local)
183 self.hook('tag', node=hex(node), tag=name, local=local)
184
184
185 return tagnode
185 return tagnode
186
186
187 def tag(self, name, node, message, local, user, date):
187 def tag(self, name, node, message, local, user, date):
188 '''tag a revision with a symbolic name.
188 '''tag a revision with a symbolic name.
189
189
190 if local is True, the tag is stored in a per-repository file.
190 if local is True, the tag is stored in a per-repository file.
191 otherwise, it is stored in the .hgtags file, and a new
191 otherwise, it is stored in the .hgtags file, and a new
192 changeset is committed with the change.
192 changeset is committed with the change.
193
193
194 keyword arguments:
194 keyword arguments:
195
195
196 local: whether to store tag in non-version-controlled file
196 local: whether to store tag in non-version-controlled file
197 (default False)
197 (default False)
198
198
199 message: commit message to use if committing
199 message: commit message to use if committing
200
200
201 user: name of user to use if committing
201 user: name of user to use if committing
202
202
203 date: date tuple to use if committing'''
203 date: date tuple to use if committing'''
204
204
205 for x in self.status()[:5]:
205 for x in self.status()[:5]:
206 if '.hgtags' in x:
206 if '.hgtags' in x:
207 raise util.Abort(_('working copy of .hgtags is changed '
207 raise util.Abort(_('working copy of .hgtags is changed '
208 '(please commit .hgtags manually)'))
208 '(please commit .hgtags manually)'))
209
209
210 self._tag(name, node, message, local, user, date)
210 self._tag(name, node, message, local, user, date)
211
211
212 def tags(self):
212 def tags(self):
213 '''return a mapping of tag to node'''
213 '''return a mapping of tag to node'''
214 if self.tagscache:
214 if self.tagscache:
215 return self.tagscache
215 return self.tagscache
216
216
217 globaltags = {}
217 globaltags = {}
218 tagtypes = {}
218 tagtypes = {}
219
219
220 def readtags(lines, fn, tagtype):
220 def readtags(lines, fn, tagtype):
221 filetags = {}
221 filetags = {}
222 count = 0
222 count = 0
223
223
224 def warn(msg):
224 def warn(msg):
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
225 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
226
226
227 for l in lines:
227 for l in lines:
228 count += 1
228 count += 1
229 if not l:
229 if not l:
230 continue
230 continue
231 s = l.split(" ", 1)
231 s = l.split(" ", 1)
232 if len(s) != 2:
232 if len(s) != 2:
233 warn(_("cannot parse entry"))
233 warn(_("cannot parse entry"))
234 continue
234 continue
235 node, key = s
235 node, key = s
236 key = util.tolocal(key.strip()) # stored in UTF-8
236 key = util.tolocal(key.strip()) # stored in UTF-8
237 try:
237 try:
238 bin_n = bin(node)
238 bin_n = bin(node)
239 except TypeError:
239 except TypeError:
240 warn(_("node '%s' is not well formed") % node)
240 warn(_("node '%s' is not well formed") % node)
241 continue
241 continue
242 if bin_n not in self.changelog.nodemap:
242 if bin_n not in self.changelog.nodemap:
243 warn(_("tag '%s' refers to unknown node") % key)
243 warn(_("tag '%s' refers to unknown node") % key)
244 continue
244 continue
245
245
246 h = []
246 h = []
247 if key in filetags:
247 if key in filetags:
248 n, h = filetags[key]
248 n, h = filetags[key]
249 h.append(n)
249 h.append(n)
250 filetags[key] = (bin_n, h)
250 filetags[key] = (bin_n, h)
251
251
252 for k, nh in filetags.items():
252 for k, nh in filetags.items():
253 if k not in globaltags:
253 if k not in globaltags:
254 globaltags[k] = nh
254 globaltags[k] = nh
255 tagtypes[k] = tagtype
255 tagtypes[k] = tagtype
256 continue
256 continue
257
257
258 # we prefer the global tag if:
258 # we prefer the global tag if:
259 # it supercedes us OR
259 # it supercedes us OR
260 # mutual supercedes and it has a higher rank
260 # mutual supercedes and it has a higher rank
261 # otherwise we win because we're tip-most
261 # otherwise we win because we're tip-most
262 an, ah = nh
262 an, ah = nh
263 bn, bh = globaltags[k]
263 bn, bh = globaltags[k]
264 if (bn != an and an in bh and
264 if (bn != an and an in bh and
265 (bn not in ah or len(bh) > len(ah))):
265 (bn not in ah or len(bh) > len(ah))):
266 an = bn
266 an = bn
267 ah.extend([n for n in bh if n not in ah])
267 ah.extend([n for n in bh if n not in ah])
268 globaltags[k] = an, ah
268 globaltags[k] = an, ah
269 tagtypes[k] = tagtype
269 tagtypes[k] = tagtype
270
270
271 # read the tags file from each head, ending with the tip
271 # read the tags file from each head, ending with the tip
272 f = None
272 f = None
273 for rev, node, fnode in self._hgtagsnodes():
273 for rev, node, fnode in self._hgtagsnodes():
274 f = (f and f.filectx(fnode) or
274 f = (f and f.filectx(fnode) or
275 self.filectx('.hgtags', fileid=fnode))
275 self.filectx('.hgtags', fileid=fnode))
276 readtags(f.data().splitlines(), f, "global")
276 readtags(f.data().splitlines(), f, "global")
277
277
278 try:
278 try:
279 data = util.fromlocal(self.opener("localtags").read())
279 data = util.fromlocal(self.opener("localtags").read())
280 # localtags are stored in the local character set
280 # localtags are stored in the local character set
281 # while the internal tag table is stored in UTF-8
281 # while the internal tag table is stored in UTF-8
282 readtags(data.splitlines(), "localtags", "local")
282 readtags(data.splitlines(), "localtags", "local")
283 except IOError:
283 except IOError:
284 pass
284 pass
285
285
286 self.tagscache = {}
286 self.tagscache = {}
287 self._tagstypecache = {}
287 self._tagstypecache = {}
288 for k,nh in globaltags.items():
288 for k,nh in globaltags.items():
289 n = nh[0]
289 n = nh[0]
290 if n != nullid:
290 if n != nullid:
291 self.tagscache[k] = n
291 self.tagscache[k] = n
292 self._tagstypecache[k] = tagtypes[k]
292 self._tagstypecache[k] = tagtypes[k]
293 self.tagscache['tip'] = self.changelog.tip()
293 self.tagscache['tip'] = self.changelog.tip()
294
294
295 return self.tagscache
295 return self.tagscache
296
296
297 def tagtype(self, tagname):
297 def tagtype(self, tagname):
298 '''
298 '''
299 return the type of the given tag. result can be:
299 return the type of the given tag. result can be:
300
300
301 'local' : a local tag
301 'local' : a local tag
302 'global' : a global tag
302 'global' : a global tag
303 None : tag does not exist
303 None : tag does not exist
304 '''
304 '''
305
305
306 self.tags()
306 self.tags()
307
307
308 return self._tagstypecache.get(tagname)
308 return self._tagstypecache.get(tagname)
309
309
310 def _hgtagsnodes(self):
310 def _hgtagsnodes(self):
311 heads = self.heads()
311 heads = self.heads()
312 heads.reverse()
312 heads.reverse()
313 last = {}
313 last = {}
314 ret = []
314 ret = []
315 for node in heads:
315 for node in heads:
316 c = self.changectx(node)
316 c = self.changectx(node)
317 rev = c.rev()
317 rev = c.rev()
318 try:
318 try:
319 fnode = c.filenode('.hgtags')
319 fnode = c.filenode('.hgtags')
320 except revlog.LookupError:
320 except revlog.LookupError:
321 continue
321 continue
322 ret.append((rev, node, fnode))
322 ret.append((rev, node, fnode))
323 if fnode in last:
323 if fnode in last:
324 ret[last[fnode]] = None
324 ret[last[fnode]] = None
325 last[fnode] = len(ret) - 1
325 last[fnode] = len(ret) - 1
326 return [item for item in ret if item]
326 return [item for item in ret if item]
327
327
328 def tagslist(self):
328 def tagslist(self):
329 '''return a list of tags ordered by revision'''
329 '''return a list of tags ordered by revision'''
330 l = []
330 l = []
331 for t, n in self.tags().items():
331 for t, n in self.tags().items():
332 try:
332 try:
333 r = self.changelog.rev(n)
333 r = self.changelog.rev(n)
334 except:
334 except:
335 r = -2 # sort to the beginning of the list if unknown
335 r = -2 # sort to the beginning of the list if unknown
336 l.append((r, t, n))
336 l.append((r, t, n))
337 l.sort()
337 l.sort()
338 return [(t, n) for r, t, n in l]
338 return [(t, n) for r, t, n in l]
339
339
340 def nodetags(self, node):
340 def nodetags(self, node):
341 '''return the tags associated with a node'''
341 '''return the tags associated with a node'''
342 if not self.nodetagscache:
342 if not self.nodetagscache:
343 self.nodetagscache = {}
343 self.nodetagscache = {}
344 for t, n in self.tags().items():
344 for t, n in self.tags().items():
345 self.nodetagscache.setdefault(n, []).append(t)
345 self.nodetagscache.setdefault(n, []).append(t)
346 return self.nodetagscache.get(node, [])
346 return self.nodetagscache.get(node, [])
347
347
348 def _branchtags(self, partial, lrev):
348 def _branchtags(self, partial, lrev):
349 tiprev = self.changelog.count() - 1
349 tiprev = self.changelog.count() - 1
350 if lrev != tiprev:
350 if lrev != tiprev:
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
351 self._updatebranchcache(partial, lrev+1, tiprev+1)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
352 self._writebranchcache(partial, self.changelog.tip(), tiprev)
353
353
354 return partial
354 return partial
355
355
356 def branchtags(self):
356 def branchtags(self):
357 tip = self.changelog.tip()
357 tip = self.changelog.tip()
358 if self.branchcache is not None and self._branchcachetip == tip:
358 if self.branchcache is not None and self._branchcachetip == tip:
359 return self.branchcache
359 return self.branchcache
360
360
361 oldtip = self._branchcachetip
361 oldtip = self._branchcachetip
362 self._branchcachetip = tip
362 self._branchcachetip = tip
363 if self.branchcache is None:
363 if self.branchcache is None:
364 self.branchcache = {} # avoid recursion in changectx
364 self.branchcache = {} # avoid recursion in changectx
365 else:
365 else:
366 self.branchcache.clear() # keep using the same dict
366 self.branchcache.clear() # keep using the same dict
367 if oldtip is None or oldtip not in self.changelog.nodemap:
367 if oldtip is None or oldtip not in self.changelog.nodemap:
368 partial, last, lrev = self._readbranchcache()
368 partial, last, lrev = self._readbranchcache()
369 else:
369 else:
370 lrev = self.changelog.rev(oldtip)
370 lrev = self.changelog.rev(oldtip)
371 partial = self._ubranchcache
371 partial = self._ubranchcache
372
372
373 self._branchtags(partial, lrev)
373 self._branchtags(partial, lrev)
374
374
375 # the branch cache is stored on disk as UTF-8, but in the local
375 # the branch cache is stored on disk as UTF-8, but in the local
376 # charset internally
376 # charset internally
377 for k, v in partial.items():
377 for k, v in partial.items():
378 self.branchcache[util.tolocal(k)] = v
378 self.branchcache[util.tolocal(k)] = v
379 self._ubranchcache = partial
379 self._ubranchcache = partial
380 return self.branchcache
380 return self.branchcache
381
381
382 def _readbranchcache(self):
382 def _readbranchcache(self):
383 partial = {}
383 partial = {}
384 try:
384 try:
385 f = self.opener("branch.cache")
385 f = self.opener("branch.cache")
386 lines = f.read().split('\n')
386 lines = f.read().split('\n')
387 f.close()
387 f.close()
388 except (IOError, OSError):
388 except (IOError, OSError):
389 return {}, nullid, nullrev
389 return {}, nullid, nullrev
390
390
391 try:
391 try:
392 last, lrev = lines.pop(0).split(" ", 1)
392 last, lrev = lines.pop(0).split(" ", 1)
393 last, lrev = bin(last), int(lrev)
393 last, lrev = bin(last), int(lrev)
394 if not (lrev < self.changelog.count() and
394 if not (lrev < self.changelog.count() and
395 self.changelog.node(lrev) == last): # sanity check
395 self.changelog.node(lrev) == last): # sanity check
396 # invalidate the cache
396 # invalidate the cache
397 raise ValueError('invalidating branch cache (tip differs)')
397 raise ValueError('invalidating branch cache (tip differs)')
398 for l in lines:
398 for l in lines:
399 if not l: continue
399 if not l: continue
400 node, label = l.split(" ", 1)
400 node, label = l.split(" ", 1)
401 partial[label.strip()] = bin(node)
401 partial[label.strip()] = bin(node)
402 except (KeyboardInterrupt, util.SignalInterrupt):
402 except (KeyboardInterrupt, util.SignalInterrupt):
403 raise
403 raise
404 except Exception, inst:
404 except Exception, inst:
405 if self.ui.debugflag:
405 if self.ui.debugflag:
406 self.ui.warn(str(inst), '\n')
406 self.ui.warn(str(inst), '\n')
407 partial, last, lrev = {}, nullid, nullrev
407 partial, last, lrev = {}, nullid, nullrev
408 return partial, last, lrev
408 return partial, last, lrev
409
409
410 def _writebranchcache(self, branches, tip, tiprev):
410 def _writebranchcache(self, branches, tip, tiprev):
411 try:
411 try:
412 f = self.opener("branch.cache", "w", atomictemp=True)
412 f = self.opener("branch.cache", "w", atomictemp=True)
413 f.write("%s %s\n" % (hex(tip), tiprev))
413 f.write("%s %s\n" % (hex(tip), tiprev))
414 for label, node in branches.iteritems():
414 for label, node in branches.iteritems():
415 f.write("%s %s\n" % (hex(node), label))
415 f.write("%s %s\n" % (hex(node), label))
416 f.rename()
416 f.rename()
417 except (IOError, OSError):
417 except (IOError, OSError):
418 pass
418 pass
419
419
420 def _updatebranchcache(self, partial, start, end):
420 def _updatebranchcache(self, partial, start, end):
421 for r in xrange(start, end):
421 for r in xrange(start, end):
422 c = self.changectx(r)
422 c = self.changectx(r)
423 b = c.branch()
423 b = c.branch()
424 partial[b] = c.node()
424 partial[b] = c.node()
425
425
426 def lookup(self, key):
426 def lookup(self, key):
427 if key == '.':
427 if key == '.':
428 key, second = self.dirstate.parents()
428 key, second = self.dirstate.parents()
429 if key == nullid:
429 if key == nullid:
430 raise repo.RepoError(_("no revision checked out"))
430 raise repo.RepoError(_("no revision checked out"))
431 if second != nullid:
431 if second != nullid:
432 self.ui.warn(_("warning: working directory has two parents, "
432 self.ui.warn(_("warning: working directory has two parents, "
433 "tag '.' uses the first\n"))
433 "tag '.' uses the first\n"))
434 elif key == 'null':
434 elif key == 'null':
435 return nullid
435 return nullid
436 n = self.changelog._match(key)
436 n = self.changelog._match(key)
437 if n:
437 if n:
438 return n
438 return n
439 if key in self.tags():
439 if key in self.tags():
440 return self.tags()[key]
440 return self.tags()[key]
441 if key in self.branchtags():
441 if key in self.branchtags():
442 return self.branchtags()[key]
442 return self.branchtags()[key]
443 n = self.changelog._partialmatch(key)
443 n = self.changelog._partialmatch(key)
444 if n:
444 if n:
445 return n
445 return n
446 try:
446 try:
447 if len(key) == 20:
447 if len(key) == 20:
448 key = hex(key)
448 key = hex(key)
449 except:
449 except:
450 pass
450 pass
451 raise repo.RepoError(_("unknown revision '%s'") % key)
451 raise repo.RepoError(_("unknown revision '%s'") % key)
452
452
453 def dev(self):
453 def dev(self):
454 return os.lstat(self.path).st_dev
454 return os.lstat(self.path).st_dev
455
455
456 def local(self):
456 def local(self):
457 return True
457 return True
458
458
459 def join(self, f):
459 def join(self, f):
460 return os.path.join(self.path, f)
460 return os.path.join(self.path, f)
461
461
462 def sjoin(self, f):
462 def sjoin(self, f):
463 f = self.encodefn(f)
463 f = self.encodefn(f)
464 return os.path.join(self.spath, f)
464 return os.path.join(self.spath, f)
465
465
466 def wjoin(self, f):
466 def wjoin(self, f):
467 return os.path.join(self.root, f)
467 return os.path.join(self.root, f)
468
468
469 def file(self, f):
469 def file(self, f):
470 if f[0] == '/':
470 if f[0] == '/':
471 f = f[1:]
471 f = f[1:]
472 return filelog.filelog(self.sopener, f)
472 return filelog.filelog(self.sopener, f)
473
473
474 def changectx(self, changeid=None):
474 def changectx(self, changeid=None):
475 return context.changectx(self, changeid)
475 return context.changectx(self, changeid)
476
476
477 def workingctx(self):
477 def workingctx(self):
478 return context.workingctx(self)
478 return context.workingctx(self)
479
479
480 def parents(self, changeid=None):
480 def parents(self, changeid=None):
481 '''
481 '''
482 get list of changectxs for parents of changeid or working directory
482 get list of changectxs for parents of changeid or working directory
483 '''
483 '''
484 if changeid is None:
484 if changeid is None:
485 pl = self.dirstate.parents()
485 pl = self.dirstate.parents()
486 else:
486 else:
487 n = self.changelog.lookup(changeid)
487 n = self.changelog.lookup(changeid)
488 pl = self.changelog.parents(n)
488 pl = self.changelog.parents(n)
489 if pl[1] == nullid:
489 if pl[1] == nullid:
490 return [self.changectx(pl[0])]
490 return [self.changectx(pl[0])]
491 return [self.changectx(pl[0]), self.changectx(pl[1])]
491 return [self.changectx(pl[0]), self.changectx(pl[1])]
492
492
493 def filectx(self, path, changeid=None, fileid=None):
493 def filectx(self, path, changeid=None, fileid=None):
494 """changeid can be a changeset revision, node, or tag.
494 """changeid can be a changeset revision, node, or tag.
495 fileid can be a file revision or node."""
495 fileid can be a file revision or node."""
496 return context.filectx(self, path, changeid, fileid)
496 return context.filectx(self, path, changeid, fileid)
497
497
498 def getcwd(self):
498 def getcwd(self):
499 return self.dirstate.getcwd()
499 return self.dirstate.getcwd()
500
500
501 def pathto(self, f, cwd=None):
501 def pathto(self, f, cwd=None):
502 return self.dirstate.pathto(f, cwd)
502 return self.dirstate.pathto(f, cwd)
503
503
504 def wfile(self, f, mode='r'):
504 def wfile(self, f, mode='r'):
505 return self.wopener(f, mode)
505 return self.wopener(f, mode)
506
506
507 def _link(self, f):
507 def _link(self, f):
508 return os.path.islink(self.wjoin(f))
508 return os.path.islink(self.wjoin(f))
509
509
510 def _filter(self, filter, filename, data):
510 def _filter(self, filter, filename, data):
511 if filter not in self.filterpats:
511 if filter not in self.filterpats:
512 l = []
512 l = []
513 for pat, cmd in self.ui.configitems(filter):
513 for pat, cmd in self.ui.configitems(filter):
514 mf = util.matcher(self.root, "", [pat], [], [])[1]
514 mf = util.matcher(self.root, "", [pat], [], [])[1]
515 fn = None
515 fn = None
516 params = cmd
516 params = cmd
517 for name, filterfn in self._datafilters.iteritems():
517 for name, filterfn in self._datafilters.iteritems():
518 if cmd.startswith(name):
518 if cmd.startswith(name):
519 fn = filterfn
519 fn = filterfn
520 params = cmd[len(name):].lstrip()
520 params = cmd[len(name):].lstrip()
521 break
521 break
522 if not fn:
522 if not fn:
523 fn = lambda s, c, **kwargs: util.filter(s, c)
523 fn = lambda s, c, **kwargs: util.filter(s, c)
524 # Wrap old filters not supporting keyword arguments
524 # Wrap old filters not supporting keyword arguments
525 if not inspect.getargspec(fn)[2]:
525 if not inspect.getargspec(fn)[2]:
526 oldfn = fn
526 oldfn = fn
527 fn = lambda s, c, **kwargs: oldfn(s, c)
527 fn = lambda s, c, **kwargs: oldfn(s, c)
528 l.append((mf, fn, params))
528 l.append((mf, fn, params))
529 self.filterpats[filter] = l
529 self.filterpats[filter] = l
530
530
531 for mf, fn, cmd in self.filterpats[filter]:
531 for mf, fn, cmd in self.filterpats[filter]:
532 if mf(filename):
532 if mf(filename):
533 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
533 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
534 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
534 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
535 break
535 break
536
536
537 return data
537 return data
538
538
539 def adddatafilter(self, name, filter):
539 def adddatafilter(self, name, filter):
540 self._datafilters[name] = filter
540 self._datafilters[name] = filter
541
541
542 def wread(self, filename):
542 def wread(self, filename):
543 if self._link(filename):
543 if self._link(filename):
544 data = os.readlink(self.wjoin(filename))
544 data = os.readlink(self.wjoin(filename))
545 else:
545 else:
546 data = self.wopener(filename, 'r').read()
546 data = self.wopener(filename, 'r').read()
547 return self._filter("encode", filename, data)
547 return self._filter("encode", filename, data)
548
548
549 def wwrite(self, filename, data, flags):
549 def wwrite(self, filename, data, flags):
550 data = self._filter("decode", filename, data)
550 data = self._filter("decode", filename, data)
551 try:
551 try:
552 os.unlink(self.wjoin(filename))
552 os.unlink(self.wjoin(filename))
553 except OSError:
553 except OSError:
554 pass
554 pass
555 self.wopener(filename, 'w').write(data)
555 self.wopener(filename, 'w').write(data)
556 util.set_flags(self.wjoin(filename), flags)
556 util.set_flags(self.wjoin(filename), flags)
557
557
558 def wwritedata(self, filename, data):
558 def wwritedata(self, filename, data):
559 return self._filter("decode", filename, data)
559 return self._filter("decode", filename, data)
560
560
561 def transaction(self):
561 def transaction(self):
562 if self._transref and self._transref():
562 if self._transref and self._transref():
563 return self._transref().nest()
563 return self._transref().nest()
564
564
565 # abort here if the journal already exists
565 # abort here if the journal already exists
566 if os.path.exists(self.sjoin("journal")):
566 if os.path.exists(self.sjoin("journal")):
567 raise repo.RepoError(_("journal already exists - run hg recover"))
567 raise repo.RepoError(_("journal already exists - run hg recover"))
568
568
569 # save dirstate for rollback
569 # save dirstate for rollback
570 try:
570 try:
571 ds = self.opener("dirstate").read()
571 ds = self.opener("dirstate").read()
572 except IOError:
572 except IOError:
573 ds = ""
573 ds = ""
574 self.opener("journal.dirstate", "w").write(ds)
574 self.opener("journal.dirstate", "w").write(ds)
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
575 self.opener("journal.branch", "w").write(self.dirstate.branch())
576
576
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
577 renames = [(self.sjoin("journal"), self.sjoin("undo")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
578 (self.join("journal.dirstate"), self.join("undo.dirstate")),
579 (self.join("journal.branch"), self.join("undo.branch"))]
579 (self.join("journal.branch"), self.join("undo.branch"))]
580 tr = transaction.transaction(self.ui.warn, self.sopener,
580 tr = transaction.transaction(self.ui.warn, self.sopener,
581 self.sjoin("journal"),
581 self.sjoin("journal"),
582 aftertrans(renames),
582 aftertrans(renames),
583 self._createmode)
583 self._createmode)
584 self._transref = weakref.ref(tr)
584 self._transref = weakref.ref(tr)
585 return tr
585 return tr
586
586
587 def recover(self):
587 def recover(self):
588 l = self.lock()
588 l = self.lock()
589 try:
589 try:
590 if os.path.exists(self.sjoin("journal")):
590 if os.path.exists(self.sjoin("journal")):
591 self.ui.status(_("rolling back interrupted transaction\n"))
591 self.ui.status(_("rolling back interrupted transaction\n"))
592 transaction.rollback(self.sopener, self.sjoin("journal"))
592 transaction.rollback(self.sopener, self.sjoin("journal"))
593 self.invalidate()
593 self.invalidate()
594 return True
594 return True
595 else:
595 else:
596 self.ui.warn(_("no interrupted transaction available\n"))
596 self.ui.warn(_("no interrupted transaction available\n"))
597 return False
597 return False
598 finally:
598 finally:
599 del l
599 del l
600
600
601 def rollback(self):
601 def rollback(self):
602 wlock = lock = None
602 wlock = lock = None
603 try:
603 try:
604 wlock = self.wlock()
604 wlock = self.wlock()
605 lock = self.lock()
605 lock = self.lock()
606 if os.path.exists(self.sjoin("undo")):
606 if os.path.exists(self.sjoin("undo")):
607 self.ui.status(_("rolling back last transaction\n"))
607 self.ui.status(_("rolling back last transaction\n"))
608 transaction.rollback(self.sopener, self.sjoin("undo"))
608 transaction.rollback(self.sopener, self.sjoin("undo"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
609 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
610 try:
610 try:
611 branch = self.opener("undo.branch").read()
611 branch = self.opener("undo.branch").read()
612 self.dirstate.setbranch(branch)
612 self.dirstate.setbranch(branch)
613 except IOError:
613 except IOError:
614 self.ui.warn(_("Named branch could not be reset, "
614 self.ui.warn(_("Named branch could not be reset, "
615 "current branch still is: %s\n")
615 "current branch still is: %s\n")
616 % util.tolocal(self.dirstate.branch()))
616 % util.tolocal(self.dirstate.branch()))
617 self.invalidate()
617 self.invalidate()
618 self.dirstate.invalidate()
618 self.dirstate.invalidate()
619 else:
619 else:
620 self.ui.warn(_("no rollback information available\n"))
620 self.ui.warn(_("no rollback information available\n"))
621 finally:
621 finally:
622 del lock, wlock
622 del lock, wlock
623
623
624 def invalidate(self):
624 def invalidate(self):
625 for a in "changelog manifest".split():
625 for a in "changelog manifest".split():
626 if hasattr(self, a):
626 if hasattr(self, a):
627 self.__delattr__(a)
627 self.__delattr__(a)
628 self.tagscache = None
628 self.tagscache = None
629 self._tagstypecache = None
629 self._tagstypecache = None
630 self.nodetagscache = None
630 self.nodetagscache = None
631 self.branchcache = None
631 self.branchcache = None
632 self._ubranchcache = None
632 self._ubranchcache = None
633 self._branchcachetip = None
633 self._branchcachetip = None
634
634
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
635 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
636 try:
636 try:
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
637 l = lock.lock(lockname, 0, releasefn, desc=desc)
638 except lock.LockHeld, inst:
638 except lock.LockHeld, inst:
639 if not wait:
639 if not wait:
640 raise
640 raise
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
641 self.ui.warn(_("waiting for lock on %s held by %r\n") %
642 (desc, inst.locker))
642 (desc, inst.locker))
643 # default to 600 seconds timeout
643 # default to 600 seconds timeout
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
644 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
645 releasefn, desc=desc)
645 releasefn, desc=desc)
646 if acquirefn:
646 if acquirefn:
647 acquirefn()
647 acquirefn()
648 return l
648 return l
649
649
650 def lock(self, wait=True):
650 def lock(self, wait=True):
651 if self._lockref and self._lockref():
651 if self._lockref and self._lockref():
652 return self._lockref()
652 return self._lockref()
653
653
654 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
654 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
655 _('repository %s') % self.origroot)
655 _('repository %s') % self.origroot)
656 self._lockref = weakref.ref(l)
656 self._lockref = weakref.ref(l)
657 return l
657 return l
658
658
659 def wlock(self, wait=True):
659 def wlock(self, wait=True):
660 if self._wlockref and self._wlockref():
660 if self._wlockref and self._wlockref():
661 return self._wlockref()
661 return self._wlockref()
662
662
663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
663 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
664 self.dirstate.invalidate, _('working directory of %s') %
664 self.dirstate.invalidate, _('working directory of %s') %
665 self.origroot)
665 self.origroot)
666 self._wlockref = weakref.ref(l)
666 self._wlockref = weakref.ref(l)
667 return l
667 return l
668
668
669 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
669 def filecommit(self, fn, manifest1, manifest2, linkrev, tr, changelist):
670 """
670 """
671 commit an individual file as part of a larger transaction
671 commit an individual file as part of a larger transaction
672 """
672 """
673
673
674 t = self.wread(fn)
674 t = self.wread(fn)
675 fl = self.file(fn)
675 fl = self.file(fn)
676 fp1 = manifest1.get(fn, nullid)
676 fp1 = manifest1.get(fn, nullid)
677 fp2 = manifest2.get(fn, nullid)
677 fp2 = manifest2.get(fn, nullid)
678
678
679 meta = {}
679 meta = {}
680 cp = self.dirstate.copied(fn)
680 cp = self.dirstate.copied(fn)
681 if cp:
681 if cp:
682 # Mark the new revision of this file as a copy of another
682 # Mark the new revision of this file as a copy of another
683 # file. This copy data will effectively act as a parent
683 # file. This copy data will effectively act as a parent
684 # of this new revision. If this is a merge, the first
684 # of this new revision. If this is a merge, the first
685 # parent will be the nullid (meaning "look up the copy data")
685 # parent will be the nullid (meaning "look up the copy data")
686 # and the second one will be the other parent. For example:
686 # and the second one will be the other parent. For example:
687 #
687 #
688 # 0 --- 1 --- 3 rev1 changes file foo
688 # 0 --- 1 --- 3 rev1 changes file foo
689 # \ / rev2 renames foo to bar and changes it
689 # \ / rev2 renames foo to bar and changes it
690 # \- 2 -/ rev3 should have bar with all changes and
690 # \- 2 -/ rev3 should have bar with all changes and
691 # should record that bar descends from
691 # should record that bar descends from
692 # bar in rev2 and foo in rev1
692 # bar in rev2 and foo in rev1
693 #
693 #
694 # this allows this merge to succeed:
694 # this allows this merge to succeed:
695 #
695 #
696 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
696 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
697 # \ / merging rev3 and rev4 should use bar@rev2
697 # \ / merging rev3 and rev4 should use bar@rev2
698 # \- 2 --- 4 as the merge base
698 # \- 2 --- 4 as the merge base
699 #
699 #
700 meta["copy"] = cp
700 meta["copy"] = cp
701 if not manifest2: # not a branch merge
701 if not manifest2: # not a branch merge
702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
702 meta["copyrev"] = hex(manifest1.get(cp, nullid))
703 fp2 = nullid
703 fp2 = nullid
704 elif fp2 != nullid: # copied on remote side
704 elif fp2 != nullid: # copied on remote side
705 meta["copyrev"] = hex(manifest1.get(cp, nullid))
705 meta["copyrev"] = hex(manifest1.get(cp, nullid))
706 elif fp1 != nullid: # copied on local side, reversed
706 elif fp1 != nullid: # copied on local side, reversed
707 meta["copyrev"] = hex(manifest2.get(cp))
707 meta["copyrev"] = hex(manifest2.get(cp))
708 fp2 = fp1
708 fp2 = fp1
709 elif cp in manifest2: # directory rename on local side
709 elif cp in manifest2: # directory rename on local side
710 meta["copyrev"] = hex(manifest2[cp])
710 meta["copyrev"] = hex(manifest2[cp])
711 else: # directory rename on remote side
711 else: # directory rename on remote side
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
712 meta["copyrev"] = hex(manifest1.get(cp, nullid))
713 self.ui.debug(_(" %s: copy %s:%s\n") %
713 self.ui.debug(_(" %s: copy %s:%s\n") %
714 (fn, cp, meta["copyrev"]))
714 (fn, cp, meta["copyrev"]))
715 fp1 = nullid
715 fp1 = nullid
716 elif fp2 != nullid:
716 elif fp2 != nullid:
717 # is one parent an ancestor of the other?
717 # is one parent an ancestor of the other?
718 fpa = fl.ancestor(fp1, fp2)
718 fpa = fl.ancestor(fp1, fp2)
719 if fpa == fp1:
719 if fpa == fp1:
720 fp1, fp2 = fp2, nullid
720 fp1, fp2 = fp2, nullid
721 elif fpa == fp2:
721 elif fpa == fp2:
722 fp2 = nullid
722 fp2 = nullid
723
723
724 # is the file unmodified from the parent? report existing entry
724 # is the file unmodified from the parent? report existing entry
725 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
725 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
726 return fp1
726 return fp1
727
727
728 changelist.append(fn)
728 changelist.append(fn)
729 return fl.add(t, meta, tr, linkrev, fp1, fp2)
729 return fl.add(t, meta, tr, linkrev, fp1, fp2)
730
730
731 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
731 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
732 if p1 is None:
732 if p1 is None:
733 p1, p2 = self.dirstate.parents()
733 p1, p2 = self.dirstate.parents()
734 return self.commit(files=files, text=text, user=user, date=date,
734 return self.commit(files=files, text=text, user=user, date=date,
735 p1=p1, p2=p2, extra=extra, empty_ok=True)
735 p1=p1, p2=p2, extra=extra, empty_ok=True)
736
736
737 def commit(self, files=None, text="", user=None, date=None,
737 def commit(self, files=None, text="", user=None, date=None,
738 match=util.always, force=False, force_editor=False,
738 match=util.always, force=False, force_editor=False,
739 p1=None, p2=None, extra={}, empty_ok=False):
739 p1=None, p2=None, extra={}, empty_ok=False):
740 wlock = lock = tr = None
740 wlock = lock = tr = None
741 valid = 0 # don't save the dirstate if this isn't set
741 valid = 0 # don't save the dirstate if this isn't set
742 if files:
742 if files:
743 files = util.unique(files)
743 files = util.unique(files)
744 try:
744 try:
745 commit = []
745 commit = []
746 remove = []
746 remove = []
747 changed = []
747 changed = []
748 use_dirstate = (p1 is None) # not rawcommit
748 use_dirstate = (p1 is None) # not rawcommit
749 extra = extra.copy()
749 extra = extra.copy()
750
750
751 if use_dirstate:
751 if use_dirstate:
752 if files:
752 if files:
753 for f in files:
753 for f in files:
754 s = self.dirstate[f]
754 s = self.dirstate[f]
755 if s in 'nma':
755 if s in 'nma':
756 commit.append(f)
756 commit.append(f)
757 elif s == 'r':
757 elif s == 'r':
758 remove.append(f)
758 remove.append(f)
759 else:
759 else:
760 self.ui.warn(_("%s not tracked!\n") % f)
760 self.ui.warn(_("%s not tracked!\n") % f)
761 else:
761 else:
762 changes = self.status(match=match)[:5]
762 changes = self.status(match=match)[:5]
763 modified, added, removed, deleted, unknown = changes
763 modified, added, removed, deleted, unknown = changes
764 commit = modified + added
764 commit = modified + added
765 remove = removed
765 remove = removed
766 else:
766 else:
767 commit = files
767 commit = files
768
768
769 if use_dirstate:
769 if use_dirstate:
770 p1, p2 = self.dirstate.parents()
770 p1, p2 = self.dirstate.parents()
771 update_dirstate = True
771 update_dirstate = True
772 else:
772 else:
773 p1, p2 = p1, p2 or nullid
773 p1, p2 = p1, p2 or nullid
774 update_dirstate = (self.dirstate.parents()[0] == p1)
774 update_dirstate = (self.dirstate.parents()[0] == p1)
775
775
776 c1 = self.changelog.read(p1)
776 c1 = self.changelog.read(p1)
777 c2 = self.changelog.read(p2)
777 c2 = self.changelog.read(p2)
778 m1 = self.manifest.read(c1[0]).copy()
778 m1 = self.manifest.read(c1[0]).copy()
779 m2 = self.manifest.read(c2[0])
779 m2 = self.manifest.read(c2[0])
780
780
781 if use_dirstate:
781 if use_dirstate:
782 branchname = self.workingctx().branch()
782 branchname = self.workingctx().branch()
783 try:
783 try:
784 branchname = branchname.decode('UTF-8').encode('UTF-8')
784 branchname = branchname.decode('UTF-8').encode('UTF-8')
785 except UnicodeDecodeError:
785 except UnicodeDecodeError:
786 raise util.Abort(_('branch name not in UTF-8!'))
786 raise util.Abort(_('branch name not in UTF-8!'))
787 else:
787 else:
788 branchname = ""
788 branchname = ""
789
789
790 if use_dirstate:
790 if use_dirstate:
791 oldname = c1[5].get("branch") # stored in UTF-8
791 oldname = c1[5].get("branch") # stored in UTF-8
792 if (not commit and not remove and not force and p2 == nullid
792 if (not commit and not remove and not force and p2 == nullid
793 and branchname == oldname):
793 and branchname == oldname):
794 self.ui.status(_("nothing changed\n"))
794 self.ui.status(_("nothing changed\n"))
795 return None
795 return None
796
796
797 xp1 = hex(p1)
797 xp1 = hex(p1)
798 if p2 == nullid: xp2 = ''
798 if p2 == nullid: xp2 = ''
799 else: xp2 = hex(p2)
799 else: xp2 = hex(p2)
800
800
801 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
801 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
802
802
803 wlock = self.wlock()
803 wlock = self.wlock()
804 lock = self.lock()
804 lock = self.lock()
805 tr = self.transaction()
805 tr = self.transaction()
806 trp = weakref.proxy(tr)
806 trp = weakref.proxy(tr)
807
807
808 # check in files
808 # check in files
809 new = {}
809 new = {}
810 linkrev = self.changelog.count()
810 linkrev = self.changelog.count()
811 commit.sort()
811 commit.sort()
812 is_exec = util.execfunc(self.root, m1.execf)
812 is_exec = util.execfunc(self.root, m1.execf)
813 is_link = util.linkfunc(self.root, m1.linkf)
813 is_link = util.linkfunc(self.root, m1.linkf)
814 for f in commit:
814 for f in commit:
815 self.ui.note(f + "\n")
815 self.ui.note(f + "\n")
816 try:
816 try:
817 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
817 new[f] = self.filecommit(f, m1, m2, linkrev, trp, changed)
818 new_exec = is_exec(f)
818 new_exec = is_exec(f)
819 new_link = is_link(f)
819 new_link = is_link(f)
820 if ((not changed or changed[-1] != f) and
820 if ((not changed or changed[-1] != f) and
821 m2.get(f) != new[f]):
821 m2.get(f) != new[f]):
822 # mention the file in the changelog if some
822 # mention the file in the changelog if some
823 # flag changed, even if there was no content
823 # flag changed, even if there was no content
824 # change.
824 # change.
825 old_exec = m1.execf(f)
825 old_exec = m1.execf(f)
826 old_link = m1.linkf(f)
826 old_link = m1.linkf(f)
827 if old_exec != new_exec or old_link != new_link:
827 if old_exec != new_exec or old_link != new_link:
828 changed.append(f)
828 changed.append(f)
829 m1.set(f, new_exec, new_link)
829 m1.set(f, new_exec, new_link)
830 if use_dirstate:
830 if use_dirstate:
831 self.dirstate.normal(f)
831 self.dirstate.normal(f)
832
832
833 except (OSError, IOError):
833 except (OSError, IOError):
834 if use_dirstate:
834 if use_dirstate:
835 self.ui.warn(_("trouble committing %s!\n") % f)
835 self.ui.warn(_("trouble committing %s!\n") % f)
836 raise
836 raise
837 else:
837 else:
838 remove.append(f)
838 remove.append(f)
839
839
840 # update manifest
840 # update manifest
841 m1.update(new)
841 m1.update(new)
842 remove.sort()
842 remove.sort()
843 removed = []
843 removed = []
844
844
845 for f in remove:
845 for f in remove:
846 if f in m1:
846 if f in m1:
847 del m1[f]
847 del m1[f]
848 removed.append(f)
848 removed.append(f)
849 elif f in m2:
849 elif f in m2:
850 removed.append(f)
850 removed.append(f)
851 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
851 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
852 (new, removed))
852 (new, removed))
853
853
854 # add changeset
854 # add changeset
855 new = new.keys()
855 new = new.keys()
856 new.sort()
856 new.sort()
857
857
858 user = user or self.ui.username()
858 user = user or self.ui.username()
859 if (not empty_ok and not text) or force_editor:
859 if (not empty_ok and not text) or force_editor:
860 edittext = []
860 edittext = []
861 if text:
861 if text:
862 edittext.append(text)
862 edittext.append(text)
863 edittext.append("")
863 edittext.append("")
864 edittext.append(_("HG: Enter commit message."
864 edittext.append(_("HG: Enter commit message."
865 " Lines beginning with 'HG:' are removed."))
865 " Lines beginning with 'HG:' are removed."))
866 edittext.append("HG: --")
866 edittext.append("HG: --")
867 edittext.append("HG: user: %s" % user)
867 edittext.append("HG: user: %s" % user)
868 if p2 != nullid:
868 if p2 != nullid:
869 edittext.append("HG: branch merge")
869 edittext.append("HG: branch merge")
870 if branchname:
870 if branchname:
871 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
871 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
872 edittext.extend(["HG: changed %s" % f for f in changed])
872 edittext.extend(["HG: changed %s" % f for f in changed])
873 edittext.extend(["HG: removed %s" % f for f in removed])
873 edittext.extend(["HG: removed %s" % f for f in removed])
874 if not changed and not remove:
874 if not changed and not remove:
875 edittext.append("HG: no files changed")
875 edittext.append("HG: no files changed")
876 edittext.append("")
876 edittext.append("")
877 # run editor in the repository root
877 # run editor in the repository root
878 olddir = os.getcwd()
878 olddir = os.getcwd()
879 os.chdir(self.root)
879 os.chdir(self.root)
880 text = self.ui.edit("\n".join(edittext), user)
880 text = self.ui.edit("\n".join(edittext), user)
881 os.chdir(olddir)
881 os.chdir(olddir)
882
882
883 if branchname:
883 if branchname:
884 extra["branch"] = branchname
884 extra["branch"] = branchname
885
885
886 if use_dirstate:
887 lines = [line.rstrip() for line in text.rstrip().splitlines()]
886 lines = [line.rstrip() for line in text.rstrip().splitlines()]
888 while lines and not lines[0]:
887 while lines and not lines[0]:
889 del lines[0]
888 del lines[0]
890 if not lines:
889 if not lines and use_dirstate:
891 raise util.Abort(_("empty commit message"))
890 raise util.Abort(_("empty commit message"))
892 text = '\n'.join(lines)
891 text = '\n'.join(lines)
893
892
894 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
893 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
895 user, date, extra)
894 user, date, extra)
896 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
895 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
897 parent2=xp2)
896 parent2=xp2)
898 tr.close()
897 tr.close()
899
898
900 if self.branchcache:
899 if self.branchcache:
901 self.branchtags()
900 self.branchtags()
902
901
903 if use_dirstate or update_dirstate:
902 if use_dirstate or update_dirstate:
904 self.dirstate.setparents(n)
903 self.dirstate.setparents(n)
905 if use_dirstate:
904 if use_dirstate:
906 for f in removed:
905 for f in removed:
907 self.dirstate.forget(f)
906 self.dirstate.forget(f)
908 valid = 1 # our dirstate updates are complete
907 valid = 1 # our dirstate updates are complete
909
908
910 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
909 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
911 return n
910 return n
912 finally:
911 finally:
913 if not valid: # don't save our updated dirstate
912 if not valid: # don't save our updated dirstate
914 self.dirstate.invalidate()
913 self.dirstate.invalidate()
915 del tr, lock, wlock
914 del tr, lock, wlock
916
915
917 def walk(self, node=None, files=[], match=util.always, badmatch=None):
916 def walk(self, node=None, files=[], match=util.always, badmatch=None):
918 '''
917 '''
919 walk recursively through the directory tree or a given
918 walk recursively through the directory tree or a given
920 changeset, finding all files matched by the match
919 changeset, finding all files matched by the match
921 function
920 function
922
921
923 results are yielded in a tuple (src, filename), where src
922 results are yielded in a tuple (src, filename), where src
924 is one of:
923 is one of:
925 'f' the file was found in the directory tree
924 'f' the file was found in the directory tree
926 'm' the file was only in the dirstate and not in the tree
925 'm' the file was only in the dirstate and not in the tree
927 'b' file was not found and matched badmatch
926 'b' file was not found and matched badmatch
928 '''
927 '''
929
928
930 if node:
929 if node:
931 fdict = dict.fromkeys(files)
930 fdict = dict.fromkeys(files)
932 # for dirstate.walk, files=['.'] means "walk the whole tree".
931 # for dirstate.walk, files=['.'] means "walk the whole tree".
933 # follow that here, too
932 # follow that here, too
934 fdict.pop('.', None)
933 fdict.pop('.', None)
935 mdict = self.manifest.read(self.changelog.read(node)[0])
934 mdict = self.manifest.read(self.changelog.read(node)[0])
936 mfiles = mdict.keys()
935 mfiles = mdict.keys()
937 mfiles.sort()
936 mfiles.sort()
938 for fn in mfiles:
937 for fn in mfiles:
939 for ffn in fdict:
938 for ffn in fdict:
940 # match if the file is the exact name or a directory
939 # match if the file is the exact name or a directory
941 if ffn == fn or fn.startswith("%s/" % ffn):
940 if ffn == fn or fn.startswith("%s/" % ffn):
942 del fdict[ffn]
941 del fdict[ffn]
943 break
942 break
944 if match(fn):
943 if match(fn):
945 yield 'm', fn
944 yield 'm', fn
946 ffiles = fdict.keys()
945 ffiles = fdict.keys()
947 ffiles.sort()
946 ffiles.sort()
948 for fn in ffiles:
947 for fn in ffiles:
949 if badmatch and badmatch(fn):
948 if badmatch and badmatch(fn):
950 if match(fn):
949 if match(fn):
951 yield 'b', fn
950 yield 'b', fn
952 else:
951 else:
953 self.ui.warn(_('%s: No such file in rev %s\n')
952 self.ui.warn(_('%s: No such file in rev %s\n')
954 % (self.pathto(fn), short(node)))
953 % (self.pathto(fn), short(node)))
955 else:
954 else:
956 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
955 for src, fn in self.dirstate.walk(files, match, badmatch=badmatch):
957 yield src, fn
956 yield src, fn
958
957
959 def status(self, node1=None, node2=None, files=[], match=util.always,
958 def status(self, node1=None, node2=None, files=[], match=util.always,
960 list_ignored=False, list_clean=False, list_unknown=True):
959 list_ignored=False, list_clean=False, list_unknown=True):
961 """return status of files between two nodes or node and working directory
960 """return status of files between two nodes or node and working directory
962
961
963 If node1 is None, use the first dirstate parent instead.
962 If node1 is None, use the first dirstate parent instead.
964 If node2 is None, compare node1 with working directory.
963 If node2 is None, compare node1 with working directory.
965 """
964 """
966
965
967 def fcmp(fn, getnode):
966 def fcmp(fn, getnode):
968 t1 = self.wread(fn)
967 t1 = self.wread(fn)
969 return self.file(fn).cmp(getnode(fn), t1)
968 return self.file(fn).cmp(getnode(fn), t1)
970
969
971 def mfmatches(node):
970 def mfmatches(node):
972 change = self.changelog.read(node)
971 change = self.changelog.read(node)
973 mf = self.manifest.read(change[0]).copy()
972 mf = self.manifest.read(change[0]).copy()
974 for fn in mf.keys():
973 for fn in mf.keys():
975 if not match(fn):
974 if not match(fn):
976 del mf[fn]
975 del mf[fn]
977 return mf
976 return mf
978
977
979 modified, added, removed, deleted, unknown = [], [], [], [], []
978 modified, added, removed, deleted, unknown = [], [], [], [], []
980 ignored, clean = [], []
979 ignored, clean = [], []
981
980
982 compareworking = False
981 compareworking = False
983 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
982 if not node1 or (not node2 and node1 == self.dirstate.parents()[0]):
984 compareworking = True
983 compareworking = True
985
984
986 if not compareworking:
985 if not compareworking:
987 # read the manifest from node1 before the manifest from node2,
986 # read the manifest from node1 before the manifest from node2,
988 # so that we'll hit the manifest cache if we're going through
987 # so that we'll hit the manifest cache if we're going through
989 # all the revisions in parent->child order.
988 # all the revisions in parent->child order.
990 mf1 = mfmatches(node1)
989 mf1 = mfmatches(node1)
991
990
992 # are we comparing the working directory?
991 # are we comparing the working directory?
993 if not node2:
992 if not node2:
994 (lookup, modified, added, removed, deleted, unknown,
993 (lookup, modified, added, removed, deleted, unknown,
995 ignored, clean) = self.dirstate.status(files, match,
994 ignored, clean) = self.dirstate.status(files, match,
996 list_ignored, list_clean,
995 list_ignored, list_clean,
997 list_unknown)
996 list_unknown)
998
997
999 # are we comparing working dir against its parent?
998 # are we comparing working dir against its parent?
1000 if compareworking:
999 if compareworking:
1001 if lookup:
1000 if lookup:
1002 fixup = []
1001 fixup = []
1003 # do a full compare of any files that might have changed
1002 # do a full compare of any files that might have changed
1004 ctx = self.changectx()
1003 ctx = self.changectx()
1005 mexec = lambda f: 'x' in ctx.fileflags(f)
1004 mexec = lambda f: 'x' in ctx.fileflags(f)
1006 mlink = lambda f: 'l' in ctx.fileflags(f)
1005 mlink = lambda f: 'l' in ctx.fileflags(f)
1007 is_exec = util.execfunc(self.root, mexec)
1006 is_exec = util.execfunc(self.root, mexec)
1008 is_link = util.linkfunc(self.root, mlink)
1007 is_link = util.linkfunc(self.root, mlink)
1009 def flags(f):
1008 def flags(f):
1010 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1009 return is_link(f) and 'l' or is_exec(f) and 'x' or ''
1011 for f in lookup:
1010 for f in lookup:
1012 if (f not in ctx or flags(f) != ctx.fileflags(f)
1011 if (f not in ctx or flags(f) != ctx.fileflags(f)
1013 or ctx[f].cmp(self.wread(f))):
1012 or ctx[f].cmp(self.wread(f))):
1014 modified.append(f)
1013 modified.append(f)
1015 else:
1014 else:
1016 fixup.append(f)
1015 fixup.append(f)
1017 if list_clean:
1016 if list_clean:
1018 clean.append(f)
1017 clean.append(f)
1019
1018
1020 # update dirstate for files that are actually clean
1019 # update dirstate for files that are actually clean
1021 if fixup:
1020 if fixup:
1022 wlock = None
1021 wlock = None
1023 try:
1022 try:
1024 try:
1023 try:
1025 wlock = self.wlock(False)
1024 wlock = self.wlock(False)
1026 except lock.LockException:
1025 except lock.LockException:
1027 pass
1026 pass
1028 if wlock:
1027 if wlock:
1029 for f in fixup:
1028 for f in fixup:
1030 self.dirstate.normal(f)
1029 self.dirstate.normal(f)
1031 finally:
1030 finally:
1032 del wlock
1031 del wlock
1033 else:
1032 else:
1034 # we are comparing working dir against non-parent
1033 # we are comparing working dir against non-parent
1035 # generate a pseudo-manifest for the working dir
1034 # generate a pseudo-manifest for the working dir
1036 # XXX: create it in dirstate.py ?
1035 # XXX: create it in dirstate.py ?
1037 mf2 = mfmatches(self.dirstate.parents()[0])
1036 mf2 = mfmatches(self.dirstate.parents()[0])
1038 is_exec = util.execfunc(self.root, mf2.execf)
1037 is_exec = util.execfunc(self.root, mf2.execf)
1039 is_link = util.linkfunc(self.root, mf2.linkf)
1038 is_link = util.linkfunc(self.root, mf2.linkf)
1040 for f in lookup + modified + added:
1039 for f in lookup + modified + added:
1041 mf2[f] = ""
1040 mf2[f] = ""
1042 mf2.set(f, is_exec(f), is_link(f))
1041 mf2.set(f, is_exec(f), is_link(f))
1043 for f in removed:
1042 for f in removed:
1044 if f in mf2:
1043 if f in mf2:
1045 del mf2[f]
1044 del mf2[f]
1046
1045
1047 else:
1046 else:
1048 # we are comparing two revisions
1047 # we are comparing two revisions
1049 mf2 = mfmatches(node2)
1048 mf2 = mfmatches(node2)
1050
1049
1051 if not compareworking:
1050 if not compareworking:
1052 # flush lists from dirstate before comparing manifests
1051 # flush lists from dirstate before comparing manifests
1053 modified, added, clean = [], [], []
1052 modified, added, clean = [], [], []
1054
1053
1055 # make sure to sort the files so we talk to the disk in a
1054 # make sure to sort the files so we talk to the disk in a
1056 # reasonable order
1055 # reasonable order
1057 mf2keys = mf2.keys()
1056 mf2keys = mf2.keys()
1058 mf2keys.sort()
1057 mf2keys.sort()
1059 getnode = lambda fn: mf1.get(fn, nullid)
1058 getnode = lambda fn: mf1.get(fn, nullid)
1060 for fn in mf2keys:
1059 for fn in mf2keys:
1061 if fn in mf1:
1060 if fn in mf1:
1062 if (mf1.flags(fn) != mf2.flags(fn) or
1061 if (mf1.flags(fn) != mf2.flags(fn) or
1063 (mf1[fn] != mf2[fn] and
1062 (mf1[fn] != mf2[fn] and
1064 (mf2[fn] != "" or fcmp(fn, getnode)))):
1063 (mf2[fn] != "" or fcmp(fn, getnode)))):
1065 modified.append(fn)
1064 modified.append(fn)
1066 elif list_clean:
1065 elif list_clean:
1067 clean.append(fn)
1066 clean.append(fn)
1068 del mf1[fn]
1067 del mf1[fn]
1069 else:
1068 else:
1070 added.append(fn)
1069 added.append(fn)
1071
1070
1072 removed = mf1.keys()
1071 removed = mf1.keys()
1073
1072
1074 # sort and return results:
1073 # sort and return results:
1075 for l in modified, added, removed, deleted, unknown, ignored, clean:
1074 for l in modified, added, removed, deleted, unknown, ignored, clean:
1076 l.sort()
1075 l.sort()
1077 return (modified, added, removed, deleted, unknown, ignored, clean)
1076 return (modified, added, removed, deleted, unknown, ignored, clean)
1078
1077
1079 def add(self, list):
1078 def add(self, list):
1080 wlock = self.wlock()
1079 wlock = self.wlock()
1081 try:
1080 try:
1082 rejected = []
1081 rejected = []
1083 for f in list:
1082 for f in list:
1084 p = self.wjoin(f)
1083 p = self.wjoin(f)
1085 try:
1084 try:
1086 st = os.lstat(p)
1085 st = os.lstat(p)
1087 except:
1086 except:
1088 self.ui.warn(_("%s does not exist!\n") % f)
1087 self.ui.warn(_("%s does not exist!\n") % f)
1089 rejected.append(f)
1088 rejected.append(f)
1090 continue
1089 continue
1091 if st.st_size > 10000000:
1090 if st.st_size > 10000000:
1092 self.ui.warn(_("%s: files over 10MB may cause memory and"
1091 self.ui.warn(_("%s: files over 10MB may cause memory and"
1093 " performance problems\n"
1092 " performance problems\n"
1094 "(use 'hg revert %s' to unadd the file)\n")
1093 "(use 'hg revert %s' to unadd the file)\n")
1095 % (f, f))
1094 % (f, f))
1096 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1095 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1097 self.ui.warn(_("%s not added: only files and symlinks "
1096 self.ui.warn(_("%s not added: only files and symlinks "
1098 "supported currently\n") % f)
1097 "supported currently\n") % f)
1099 rejected.append(p)
1098 rejected.append(p)
1100 elif self.dirstate[f] in 'amn':
1099 elif self.dirstate[f] in 'amn':
1101 self.ui.warn(_("%s already tracked!\n") % f)
1100 self.ui.warn(_("%s already tracked!\n") % f)
1102 elif self.dirstate[f] == 'r':
1101 elif self.dirstate[f] == 'r':
1103 self.dirstate.normallookup(f)
1102 self.dirstate.normallookup(f)
1104 else:
1103 else:
1105 self.dirstate.add(f)
1104 self.dirstate.add(f)
1106 return rejected
1105 return rejected
1107 finally:
1106 finally:
1108 del wlock
1107 del wlock
1109
1108
1110 def forget(self, list):
1109 def forget(self, list):
1111 wlock = self.wlock()
1110 wlock = self.wlock()
1112 try:
1111 try:
1113 for f in list:
1112 for f in list:
1114 if self.dirstate[f] != 'a':
1113 if self.dirstate[f] != 'a':
1115 self.ui.warn(_("%s not added!\n") % f)
1114 self.ui.warn(_("%s not added!\n") % f)
1116 else:
1115 else:
1117 self.dirstate.forget(f)
1116 self.dirstate.forget(f)
1118 finally:
1117 finally:
1119 del wlock
1118 del wlock
1120
1119
1121 def remove(self, list, unlink=False):
1120 def remove(self, list, unlink=False):
1122 wlock = None
1121 wlock = None
1123 try:
1122 try:
1124 if unlink:
1123 if unlink:
1125 for f in list:
1124 for f in list:
1126 try:
1125 try:
1127 util.unlink(self.wjoin(f))
1126 util.unlink(self.wjoin(f))
1128 except OSError, inst:
1127 except OSError, inst:
1129 if inst.errno != errno.ENOENT:
1128 if inst.errno != errno.ENOENT:
1130 raise
1129 raise
1131 wlock = self.wlock()
1130 wlock = self.wlock()
1132 for f in list:
1131 for f in list:
1133 if unlink and os.path.exists(self.wjoin(f)):
1132 if unlink and os.path.exists(self.wjoin(f)):
1134 self.ui.warn(_("%s still exists!\n") % f)
1133 self.ui.warn(_("%s still exists!\n") % f)
1135 elif self.dirstate[f] == 'a':
1134 elif self.dirstate[f] == 'a':
1136 self.dirstate.forget(f)
1135 self.dirstate.forget(f)
1137 elif f not in self.dirstate:
1136 elif f not in self.dirstate:
1138 self.ui.warn(_("%s not tracked!\n") % f)
1137 self.ui.warn(_("%s not tracked!\n") % f)
1139 else:
1138 else:
1140 self.dirstate.remove(f)
1139 self.dirstate.remove(f)
1141 finally:
1140 finally:
1142 del wlock
1141 del wlock
1143
1142
1144 def undelete(self, list):
1143 def undelete(self, list):
1145 wlock = None
1144 wlock = None
1146 try:
1145 try:
1147 manifests = [self.manifest.read(self.changelog.read(p)[0])
1146 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 for p in self.dirstate.parents() if p != nullid]
1147 for p in self.dirstate.parents() if p != nullid]
1149 wlock = self.wlock()
1148 wlock = self.wlock()
1150 for f in list:
1149 for f in list:
1151 if self.dirstate[f] != 'r':
1150 if self.dirstate[f] != 'r':
1152 self.ui.warn("%s not removed!\n" % f)
1151 self.ui.warn("%s not removed!\n" % f)
1153 else:
1152 else:
1154 m = f in manifests[0] and manifests[0] or manifests[1]
1153 m = f in manifests[0] and manifests[0] or manifests[1]
1155 t = self.file(f).read(m[f])
1154 t = self.file(f).read(m[f])
1156 self.wwrite(f, t, m.flags(f))
1155 self.wwrite(f, t, m.flags(f))
1157 self.dirstate.normal(f)
1156 self.dirstate.normal(f)
1158 finally:
1157 finally:
1159 del wlock
1158 del wlock
1160
1159
1161 def copy(self, source, dest):
1160 def copy(self, source, dest):
1162 wlock = None
1161 wlock = None
1163 try:
1162 try:
1164 p = self.wjoin(dest)
1163 p = self.wjoin(dest)
1165 if not (os.path.exists(p) or os.path.islink(p)):
1164 if not (os.path.exists(p) or os.path.islink(p)):
1166 self.ui.warn(_("%s does not exist!\n") % dest)
1165 self.ui.warn(_("%s does not exist!\n") % dest)
1167 elif not (os.path.isfile(p) or os.path.islink(p)):
1166 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 self.ui.warn(_("copy failed: %s is not a file or a "
1167 self.ui.warn(_("copy failed: %s is not a file or a "
1169 "symbolic link\n") % dest)
1168 "symbolic link\n") % dest)
1170 else:
1169 else:
1171 wlock = self.wlock()
1170 wlock = self.wlock()
1172 if dest not in self.dirstate:
1171 if dest not in self.dirstate:
1173 self.dirstate.add(dest)
1172 self.dirstate.add(dest)
1174 self.dirstate.copy(source, dest)
1173 self.dirstate.copy(source, dest)
1175 finally:
1174 finally:
1176 del wlock
1175 del wlock
1177
1176
1178 def heads(self, start=None):
1177 def heads(self, start=None):
1179 heads = self.changelog.heads(start)
1178 heads = self.changelog.heads(start)
1180 # sort the output in rev descending order
1179 # sort the output in rev descending order
1181 heads = [(-self.changelog.rev(h), h) for h in heads]
1180 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 heads.sort()
1181 heads.sort()
1183 return [n for (r, n) in heads]
1182 return [n for (r, n) in heads]
1184
1183
1185 def branchheads(self, branch, start=None):
1184 def branchheads(self, branch, start=None):
1186 branches = self.branchtags()
1185 branches = self.branchtags()
1187 if branch not in branches:
1186 if branch not in branches:
1188 return []
1187 return []
1189 # The basic algorithm is this:
1188 # The basic algorithm is this:
1190 #
1189 #
1191 # Start from the branch tip since there are no later revisions that can
1190 # Start from the branch tip since there are no later revisions that can
1192 # possibly be in this branch, and the tip is a guaranteed head.
1191 # possibly be in this branch, and the tip is a guaranteed head.
1193 #
1192 #
1194 # Remember the tip's parents as the first ancestors, since these by
1193 # Remember the tip's parents as the first ancestors, since these by
1195 # definition are not heads.
1194 # definition are not heads.
1196 #
1195 #
1197 # Step backwards from the brach tip through all the revisions. We are
1196 # Step backwards from the brach tip through all the revisions. We are
1198 # guaranteed by the rules of Mercurial that we will now be visiting the
1197 # guaranteed by the rules of Mercurial that we will now be visiting the
1199 # nodes in reverse topological order (children before parents).
1198 # nodes in reverse topological order (children before parents).
1200 #
1199 #
1201 # If a revision is one of the ancestors of a head then we can toss it
1200 # If a revision is one of the ancestors of a head then we can toss it
1202 # out of the ancestors set (we've already found it and won't be
1201 # out of the ancestors set (we've already found it and won't be
1203 # visiting it again) and put its parents in the ancestors set.
1202 # visiting it again) and put its parents in the ancestors set.
1204 #
1203 #
1205 # Otherwise, if a revision is in the branch it's another head, since it
1204 # Otherwise, if a revision is in the branch it's another head, since it
1206 # wasn't in the ancestor list of an existing head. So add it to the
1205 # wasn't in the ancestor list of an existing head. So add it to the
1207 # head list, and add its parents to the ancestor list.
1206 # head list, and add its parents to the ancestor list.
1208 #
1207 #
1209 # If it is not in the branch ignore it.
1208 # If it is not in the branch ignore it.
1210 #
1209 #
1211 # Once we have a list of heads, use nodesbetween to filter out all the
1210 # Once we have a list of heads, use nodesbetween to filter out all the
1212 # heads that cannot be reached from startrev. There may be a more
1211 # heads that cannot be reached from startrev. There may be a more
1213 # efficient way to do this as part of the previous algorithm.
1212 # efficient way to do this as part of the previous algorithm.
1214
1213
1215 set = util.set
1214 set = util.set
1216 heads = [self.changelog.rev(branches[branch])]
1215 heads = [self.changelog.rev(branches[branch])]
1217 # Don't care if ancestors contains nullrev or not.
1216 # Don't care if ancestors contains nullrev or not.
1218 ancestors = set(self.changelog.parentrevs(heads[0]))
1217 ancestors = set(self.changelog.parentrevs(heads[0]))
1219 for rev in xrange(heads[0] - 1, nullrev, -1):
1218 for rev in xrange(heads[0] - 1, nullrev, -1):
1220 if rev in ancestors:
1219 if rev in ancestors:
1221 ancestors.update(self.changelog.parentrevs(rev))
1220 ancestors.update(self.changelog.parentrevs(rev))
1222 ancestors.remove(rev)
1221 ancestors.remove(rev)
1223 elif self.changectx(rev).branch() == branch:
1222 elif self.changectx(rev).branch() == branch:
1224 heads.append(rev)
1223 heads.append(rev)
1225 ancestors.update(self.changelog.parentrevs(rev))
1224 ancestors.update(self.changelog.parentrevs(rev))
1226 heads = [self.changelog.node(rev) for rev in heads]
1225 heads = [self.changelog.node(rev) for rev in heads]
1227 if start is not None:
1226 if start is not None:
1228 heads = self.changelog.nodesbetween([start], heads)[2]
1227 heads = self.changelog.nodesbetween([start], heads)[2]
1229 return heads
1228 return heads
1230
1229
1231 def branches(self, nodes):
1230 def branches(self, nodes):
1232 if not nodes:
1231 if not nodes:
1233 nodes = [self.changelog.tip()]
1232 nodes = [self.changelog.tip()]
1234 b = []
1233 b = []
1235 for n in nodes:
1234 for n in nodes:
1236 t = n
1235 t = n
1237 while 1:
1236 while 1:
1238 p = self.changelog.parents(n)
1237 p = self.changelog.parents(n)
1239 if p[1] != nullid or p[0] == nullid:
1238 if p[1] != nullid or p[0] == nullid:
1240 b.append((t, n, p[0], p[1]))
1239 b.append((t, n, p[0], p[1]))
1241 break
1240 break
1242 n = p[0]
1241 n = p[0]
1243 return b
1242 return b
1244
1243
1245 def between(self, pairs):
1244 def between(self, pairs):
1246 r = []
1245 r = []
1247
1246
1248 for top, bottom in pairs:
1247 for top, bottom in pairs:
1249 n, l, i = top, [], 0
1248 n, l, i = top, [], 0
1250 f = 1
1249 f = 1
1251
1250
1252 while n != bottom:
1251 while n != bottom:
1253 p = self.changelog.parents(n)[0]
1252 p = self.changelog.parents(n)[0]
1254 if i == f:
1253 if i == f:
1255 l.append(n)
1254 l.append(n)
1256 f = f * 2
1255 f = f * 2
1257 n = p
1256 n = p
1258 i += 1
1257 i += 1
1259
1258
1260 r.append(l)
1259 r.append(l)
1261
1260
1262 return r
1261 return r
1263
1262
1264 def findincoming(self, remote, base=None, heads=None, force=False):
1263 def findincoming(self, remote, base=None, heads=None, force=False):
1265 """Return list of roots of the subsets of missing nodes from remote
1264 """Return list of roots of the subsets of missing nodes from remote
1266
1265
1267 If base dict is specified, assume that these nodes and their parents
1266 If base dict is specified, assume that these nodes and their parents
1268 exist on the remote side and that no child of a node of base exists
1267 exist on the remote side and that no child of a node of base exists
1269 in both remote and self.
1268 in both remote and self.
1270 Furthermore base will be updated to include the nodes that exists
1269 Furthermore base will be updated to include the nodes that exists
1271 in self and remote but no children exists in self and remote.
1270 in self and remote but no children exists in self and remote.
1272 If a list of heads is specified, return only nodes which are heads
1271 If a list of heads is specified, return only nodes which are heads
1273 or ancestors of these heads.
1272 or ancestors of these heads.
1274
1273
1275 All the ancestors of base are in self and in remote.
1274 All the ancestors of base are in self and in remote.
1276 All the descendants of the list returned are missing in self.
1275 All the descendants of the list returned are missing in self.
1277 (and so we know that the rest of the nodes are missing in remote, see
1276 (and so we know that the rest of the nodes are missing in remote, see
1278 outgoing)
1277 outgoing)
1279 """
1278 """
1280 m = self.changelog.nodemap
1279 m = self.changelog.nodemap
1281 search = []
1280 search = []
1282 fetch = {}
1281 fetch = {}
1283 seen = {}
1282 seen = {}
1284 seenbranch = {}
1283 seenbranch = {}
1285 if base == None:
1284 if base == None:
1286 base = {}
1285 base = {}
1287
1286
1288 if not heads:
1287 if not heads:
1289 heads = remote.heads()
1288 heads = remote.heads()
1290
1289
1291 if self.changelog.tip() == nullid:
1290 if self.changelog.tip() == nullid:
1292 base[nullid] = 1
1291 base[nullid] = 1
1293 if heads != [nullid]:
1292 if heads != [nullid]:
1294 return [nullid]
1293 return [nullid]
1295 return []
1294 return []
1296
1295
1297 # assume we're closer to the tip than the root
1296 # assume we're closer to the tip than the root
1298 # and start by examining the heads
1297 # and start by examining the heads
1299 self.ui.status(_("searching for changes\n"))
1298 self.ui.status(_("searching for changes\n"))
1300
1299
1301 unknown = []
1300 unknown = []
1302 for h in heads:
1301 for h in heads:
1303 if h not in m:
1302 if h not in m:
1304 unknown.append(h)
1303 unknown.append(h)
1305 else:
1304 else:
1306 base[h] = 1
1305 base[h] = 1
1307
1306
1308 if not unknown:
1307 if not unknown:
1309 return []
1308 return []
1310
1309
1311 req = dict.fromkeys(unknown)
1310 req = dict.fromkeys(unknown)
1312 reqcnt = 0
1311 reqcnt = 0
1313
1312
1314 # search through remote branches
1313 # search through remote branches
1315 # a 'branch' here is a linear segment of history, with four parts:
1314 # a 'branch' here is a linear segment of history, with four parts:
1316 # head, root, first parent, second parent
1315 # head, root, first parent, second parent
1317 # (a branch always has two parents (or none) by definition)
1316 # (a branch always has two parents (or none) by definition)
1318 unknown = remote.branches(unknown)
1317 unknown = remote.branches(unknown)
1319 while unknown:
1318 while unknown:
1320 r = []
1319 r = []
1321 while unknown:
1320 while unknown:
1322 n = unknown.pop(0)
1321 n = unknown.pop(0)
1323 if n[0] in seen:
1322 if n[0] in seen:
1324 continue
1323 continue
1325
1324
1326 self.ui.debug(_("examining %s:%s\n")
1325 self.ui.debug(_("examining %s:%s\n")
1327 % (short(n[0]), short(n[1])))
1326 % (short(n[0]), short(n[1])))
1328 if n[0] == nullid: # found the end of the branch
1327 if n[0] == nullid: # found the end of the branch
1329 pass
1328 pass
1330 elif n in seenbranch:
1329 elif n in seenbranch:
1331 self.ui.debug(_("branch already found\n"))
1330 self.ui.debug(_("branch already found\n"))
1332 continue
1331 continue
1333 elif n[1] and n[1] in m: # do we know the base?
1332 elif n[1] and n[1] in m: # do we know the base?
1334 self.ui.debug(_("found incomplete branch %s:%s\n")
1333 self.ui.debug(_("found incomplete branch %s:%s\n")
1335 % (short(n[0]), short(n[1])))
1334 % (short(n[0]), short(n[1])))
1336 search.append(n) # schedule branch range for scanning
1335 search.append(n) # schedule branch range for scanning
1337 seenbranch[n] = 1
1336 seenbranch[n] = 1
1338 else:
1337 else:
1339 if n[1] not in seen and n[1] not in fetch:
1338 if n[1] not in seen and n[1] not in fetch:
1340 if n[2] in m and n[3] in m:
1339 if n[2] in m and n[3] in m:
1341 self.ui.debug(_("found new changeset %s\n") %
1340 self.ui.debug(_("found new changeset %s\n") %
1342 short(n[1]))
1341 short(n[1]))
1343 fetch[n[1]] = 1 # earliest unknown
1342 fetch[n[1]] = 1 # earliest unknown
1344 for p in n[2:4]:
1343 for p in n[2:4]:
1345 if p in m:
1344 if p in m:
1346 base[p] = 1 # latest known
1345 base[p] = 1 # latest known
1347
1346
1348 for p in n[2:4]:
1347 for p in n[2:4]:
1349 if p not in req and p not in m:
1348 if p not in req and p not in m:
1350 r.append(p)
1349 r.append(p)
1351 req[p] = 1
1350 req[p] = 1
1352 seen[n[0]] = 1
1351 seen[n[0]] = 1
1353
1352
1354 if r:
1353 if r:
1355 reqcnt += 1
1354 reqcnt += 1
1356 self.ui.debug(_("request %d: %s\n") %
1355 self.ui.debug(_("request %d: %s\n") %
1357 (reqcnt, " ".join(map(short, r))))
1356 (reqcnt, " ".join(map(short, r))))
1358 for p in xrange(0, len(r), 10):
1357 for p in xrange(0, len(r), 10):
1359 for b in remote.branches(r[p:p+10]):
1358 for b in remote.branches(r[p:p+10]):
1360 self.ui.debug(_("received %s:%s\n") %
1359 self.ui.debug(_("received %s:%s\n") %
1361 (short(b[0]), short(b[1])))
1360 (short(b[0]), short(b[1])))
1362 unknown.append(b)
1361 unknown.append(b)
1363
1362
1364 # do binary search on the branches we found
1363 # do binary search on the branches we found
1365 while search:
1364 while search:
1366 n = search.pop(0)
1365 n = search.pop(0)
1367 reqcnt += 1
1366 reqcnt += 1
1368 l = remote.between([(n[0], n[1])])[0]
1367 l = remote.between([(n[0], n[1])])[0]
1369 l.append(n[1])
1368 l.append(n[1])
1370 p = n[0]
1369 p = n[0]
1371 f = 1
1370 f = 1
1372 for i in l:
1371 for i in l:
1373 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1372 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1374 if i in m:
1373 if i in m:
1375 if f <= 2:
1374 if f <= 2:
1376 self.ui.debug(_("found new branch changeset %s\n") %
1375 self.ui.debug(_("found new branch changeset %s\n") %
1377 short(p))
1376 short(p))
1378 fetch[p] = 1
1377 fetch[p] = 1
1379 base[i] = 1
1378 base[i] = 1
1380 else:
1379 else:
1381 self.ui.debug(_("narrowed branch search to %s:%s\n")
1380 self.ui.debug(_("narrowed branch search to %s:%s\n")
1382 % (short(p), short(i)))
1381 % (short(p), short(i)))
1383 search.append((p, i))
1382 search.append((p, i))
1384 break
1383 break
1385 p, f = i, f * 2
1384 p, f = i, f * 2
1386
1385
1387 # sanity check our fetch list
1386 # sanity check our fetch list
1388 for f in fetch.keys():
1387 for f in fetch.keys():
1389 if f in m:
1388 if f in m:
1390 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1389 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
1391
1390
1392 if base.keys() == [nullid]:
1391 if base.keys() == [nullid]:
1393 if force:
1392 if force:
1394 self.ui.warn(_("warning: repository is unrelated\n"))
1393 self.ui.warn(_("warning: repository is unrelated\n"))
1395 else:
1394 else:
1396 raise util.Abort(_("repository is unrelated"))
1395 raise util.Abort(_("repository is unrelated"))
1397
1396
1398 self.ui.debug(_("found new changesets starting at ") +
1397 self.ui.debug(_("found new changesets starting at ") +
1399 " ".join([short(f) for f in fetch]) + "\n")
1398 " ".join([short(f) for f in fetch]) + "\n")
1400
1399
1401 self.ui.debug(_("%d total queries\n") % reqcnt)
1400 self.ui.debug(_("%d total queries\n") % reqcnt)
1402
1401
1403 return fetch.keys()
1402 return fetch.keys()
1404
1403
1405 def findoutgoing(self, remote, base=None, heads=None, force=False):
1404 def findoutgoing(self, remote, base=None, heads=None, force=False):
1406 """Return list of nodes that are roots of subsets not in remote
1405 """Return list of nodes that are roots of subsets not in remote
1407
1406
1408 If base dict is specified, assume that these nodes and their parents
1407 If base dict is specified, assume that these nodes and their parents
1409 exist on the remote side.
1408 exist on the remote side.
1410 If a list of heads is specified, return only nodes which are heads
1409 If a list of heads is specified, return only nodes which are heads
1411 or ancestors of these heads, and return a second element which
1410 or ancestors of these heads, and return a second element which
1412 contains all remote heads which get new children.
1411 contains all remote heads which get new children.
1413 """
1412 """
1414 if base == None:
1413 if base == None:
1415 base = {}
1414 base = {}
1416 self.findincoming(remote, base, heads, force=force)
1415 self.findincoming(remote, base, heads, force=force)
1417
1416
1418 self.ui.debug(_("common changesets up to ")
1417 self.ui.debug(_("common changesets up to ")
1419 + " ".join(map(short, base.keys())) + "\n")
1418 + " ".join(map(short, base.keys())) + "\n")
1420
1419
1421 remain = dict.fromkeys(self.changelog.nodemap)
1420 remain = dict.fromkeys(self.changelog.nodemap)
1422
1421
1423 # prune everything remote has from the tree
1422 # prune everything remote has from the tree
1424 del remain[nullid]
1423 del remain[nullid]
1425 remove = base.keys()
1424 remove = base.keys()
1426 while remove:
1425 while remove:
1427 n = remove.pop(0)
1426 n = remove.pop(0)
1428 if n in remain:
1427 if n in remain:
1429 del remain[n]
1428 del remain[n]
1430 for p in self.changelog.parents(n):
1429 for p in self.changelog.parents(n):
1431 remove.append(p)
1430 remove.append(p)
1432
1431
1433 # find every node whose parents have been pruned
1432 # find every node whose parents have been pruned
1434 subset = []
1433 subset = []
1435 # find every remote head that will get new children
1434 # find every remote head that will get new children
1436 updated_heads = {}
1435 updated_heads = {}
1437 for n in remain:
1436 for n in remain:
1438 p1, p2 = self.changelog.parents(n)
1437 p1, p2 = self.changelog.parents(n)
1439 if p1 not in remain and p2 not in remain:
1438 if p1 not in remain and p2 not in remain:
1440 subset.append(n)
1439 subset.append(n)
1441 if heads:
1440 if heads:
1442 if p1 in heads:
1441 if p1 in heads:
1443 updated_heads[p1] = True
1442 updated_heads[p1] = True
1444 if p2 in heads:
1443 if p2 in heads:
1445 updated_heads[p2] = True
1444 updated_heads[p2] = True
1446
1445
1447 # this is the set of all roots we have to push
1446 # this is the set of all roots we have to push
1448 if heads:
1447 if heads:
1449 return subset, updated_heads.keys()
1448 return subset, updated_heads.keys()
1450 else:
1449 else:
1451 return subset
1450 return subset
1452
1451
1453 def pull(self, remote, heads=None, force=False):
1452 def pull(self, remote, heads=None, force=False):
1454 lock = self.lock()
1453 lock = self.lock()
1455 try:
1454 try:
1456 fetch = self.findincoming(remote, heads=heads, force=force)
1455 fetch = self.findincoming(remote, heads=heads, force=force)
1457 if fetch == [nullid]:
1456 if fetch == [nullid]:
1458 self.ui.status(_("requesting all changes\n"))
1457 self.ui.status(_("requesting all changes\n"))
1459
1458
1460 if not fetch:
1459 if not fetch:
1461 self.ui.status(_("no changes found\n"))
1460 self.ui.status(_("no changes found\n"))
1462 return 0
1461 return 0
1463
1462
1464 if heads is None:
1463 if heads is None:
1465 cg = remote.changegroup(fetch, 'pull')
1464 cg = remote.changegroup(fetch, 'pull')
1466 else:
1465 else:
1467 if 'changegroupsubset' not in remote.capabilities:
1466 if 'changegroupsubset' not in remote.capabilities:
1468 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1467 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1469 cg = remote.changegroupsubset(fetch, heads, 'pull')
1468 cg = remote.changegroupsubset(fetch, heads, 'pull')
1470 return self.addchangegroup(cg, 'pull', remote.url())
1469 return self.addchangegroup(cg, 'pull', remote.url())
1471 finally:
1470 finally:
1472 del lock
1471 del lock
1473
1472
1474 def push(self, remote, force=False, revs=None):
1473 def push(self, remote, force=False, revs=None):
1475 # there are two ways to push to remote repo:
1474 # there are two ways to push to remote repo:
1476 #
1475 #
1477 # addchangegroup assumes local user can lock remote
1476 # addchangegroup assumes local user can lock remote
1478 # repo (local filesystem, old ssh servers).
1477 # repo (local filesystem, old ssh servers).
1479 #
1478 #
1480 # unbundle assumes local user cannot lock remote repo (new ssh
1479 # unbundle assumes local user cannot lock remote repo (new ssh
1481 # servers, http servers).
1480 # servers, http servers).
1482
1481
1483 if remote.capable('unbundle'):
1482 if remote.capable('unbundle'):
1484 return self.push_unbundle(remote, force, revs)
1483 return self.push_unbundle(remote, force, revs)
1485 return self.push_addchangegroup(remote, force, revs)
1484 return self.push_addchangegroup(remote, force, revs)
1486
1485
1487 def prepush(self, remote, force, revs):
1486 def prepush(self, remote, force, revs):
1488 base = {}
1487 base = {}
1489 remote_heads = remote.heads()
1488 remote_heads = remote.heads()
1490 inc = self.findincoming(remote, base, remote_heads, force=force)
1489 inc = self.findincoming(remote, base, remote_heads, force=force)
1491
1490
1492 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1491 update, updated_heads = self.findoutgoing(remote, base, remote_heads)
1493 if revs is not None:
1492 if revs is not None:
1494 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1493 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1495 else:
1494 else:
1496 bases, heads = update, self.changelog.heads()
1495 bases, heads = update, self.changelog.heads()
1497
1496
1498 if not bases:
1497 if not bases:
1499 self.ui.status(_("no changes found\n"))
1498 self.ui.status(_("no changes found\n"))
1500 return None, 1
1499 return None, 1
1501 elif not force:
1500 elif not force:
1502 # check if we're creating new remote heads
1501 # check if we're creating new remote heads
1503 # to be a remote head after push, node must be either
1502 # to be a remote head after push, node must be either
1504 # - unknown locally
1503 # - unknown locally
1505 # - a local outgoing head descended from update
1504 # - a local outgoing head descended from update
1506 # - a remote head that's known locally and not
1505 # - a remote head that's known locally and not
1507 # ancestral to an outgoing head
1506 # ancestral to an outgoing head
1508
1507
1509 warn = 0
1508 warn = 0
1510
1509
1511 if remote_heads == [nullid]:
1510 if remote_heads == [nullid]:
1512 warn = 0
1511 warn = 0
1513 elif not revs and len(heads) > len(remote_heads):
1512 elif not revs and len(heads) > len(remote_heads):
1514 warn = 1
1513 warn = 1
1515 else:
1514 else:
1516 newheads = list(heads)
1515 newheads = list(heads)
1517 for r in remote_heads:
1516 for r in remote_heads:
1518 if r in self.changelog.nodemap:
1517 if r in self.changelog.nodemap:
1519 desc = self.changelog.heads(r, heads)
1518 desc = self.changelog.heads(r, heads)
1520 l = [h for h in heads if h in desc]
1519 l = [h for h in heads if h in desc]
1521 if not l:
1520 if not l:
1522 newheads.append(r)
1521 newheads.append(r)
1523 else:
1522 else:
1524 newheads.append(r)
1523 newheads.append(r)
1525 if len(newheads) > len(remote_heads):
1524 if len(newheads) > len(remote_heads):
1526 warn = 1
1525 warn = 1
1527
1526
1528 if warn:
1527 if warn:
1529 self.ui.warn(_("abort: push creates new remote heads!\n"))
1528 self.ui.warn(_("abort: push creates new remote heads!\n"))
1530 self.ui.status(_("(did you forget to merge?"
1529 self.ui.status(_("(did you forget to merge?"
1531 " use push -f to force)\n"))
1530 " use push -f to force)\n"))
1532 return None, 0
1531 return None, 0
1533 elif inc:
1532 elif inc:
1534 self.ui.warn(_("note: unsynced remote changes!\n"))
1533 self.ui.warn(_("note: unsynced remote changes!\n"))
1535
1534
1536
1535
1537 if revs is None:
1536 if revs is None:
1538 cg = self.changegroup(update, 'push')
1537 cg = self.changegroup(update, 'push')
1539 else:
1538 else:
1540 cg = self.changegroupsubset(update, revs, 'push')
1539 cg = self.changegroupsubset(update, revs, 'push')
1541 return cg, remote_heads
1540 return cg, remote_heads
1542
1541
1543 def push_addchangegroup(self, remote, force, revs):
1542 def push_addchangegroup(self, remote, force, revs):
1544 lock = remote.lock()
1543 lock = remote.lock()
1545 try:
1544 try:
1546 ret = self.prepush(remote, force, revs)
1545 ret = self.prepush(remote, force, revs)
1547 if ret[0] is not None:
1546 if ret[0] is not None:
1548 cg, remote_heads = ret
1547 cg, remote_heads = ret
1549 return remote.addchangegroup(cg, 'push', self.url())
1548 return remote.addchangegroup(cg, 'push', self.url())
1550 return ret[1]
1549 return ret[1]
1551 finally:
1550 finally:
1552 del lock
1551 del lock
1553
1552
1554 def push_unbundle(self, remote, force, revs):
1553 def push_unbundle(self, remote, force, revs):
1555 # local repo finds heads on server, finds out what revs it
1554 # local repo finds heads on server, finds out what revs it
1556 # must push. once revs transferred, if server finds it has
1555 # must push. once revs transferred, if server finds it has
1557 # different heads (someone else won commit/push race), server
1556 # different heads (someone else won commit/push race), server
1558 # aborts.
1557 # aborts.
1559
1558
1560 ret = self.prepush(remote, force, revs)
1559 ret = self.prepush(remote, force, revs)
1561 if ret[0] is not None:
1560 if ret[0] is not None:
1562 cg, remote_heads = ret
1561 cg, remote_heads = ret
1563 if force: remote_heads = ['force']
1562 if force: remote_heads = ['force']
1564 return remote.unbundle(cg, remote_heads, 'push')
1563 return remote.unbundle(cg, remote_heads, 'push')
1565 return ret[1]
1564 return ret[1]
1566
1565
1567 def changegroupinfo(self, nodes, source):
1566 def changegroupinfo(self, nodes, source):
1568 if self.ui.verbose or source == 'bundle':
1567 if self.ui.verbose or source == 'bundle':
1569 self.ui.status(_("%d changesets found\n") % len(nodes))
1568 self.ui.status(_("%d changesets found\n") % len(nodes))
1570 if self.ui.debugflag:
1569 if self.ui.debugflag:
1571 self.ui.debug(_("List of changesets:\n"))
1570 self.ui.debug(_("List of changesets:\n"))
1572 for node in nodes:
1571 for node in nodes:
1573 self.ui.debug("%s\n" % hex(node))
1572 self.ui.debug("%s\n" % hex(node))
1574
1573
1575 def changegroupsubset(self, bases, heads, source, extranodes=None):
1574 def changegroupsubset(self, bases, heads, source, extranodes=None):
1576 """This function generates a changegroup consisting of all the nodes
1575 """This function generates a changegroup consisting of all the nodes
1577 that are descendents of any of the bases, and ancestors of any of
1576 that are descendents of any of the bases, and ancestors of any of
1578 the heads.
1577 the heads.
1579
1578
1580 It is fairly complex as determining which filenodes and which
1579 It is fairly complex as determining which filenodes and which
1581 manifest nodes need to be included for the changeset to be complete
1580 manifest nodes need to be included for the changeset to be complete
1582 is non-trivial.
1581 is non-trivial.
1583
1582
1584 Another wrinkle is doing the reverse, figuring out which changeset in
1583 Another wrinkle is doing the reverse, figuring out which changeset in
1585 the changegroup a particular filenode or manifestnode belongs to.
1584 the changegroup a particular filenode or manifestnode belongs to.
1586
1585
1587 The caller can specify some nodes that must be included in the
1586 The caller can specify some nodes that must be included in the
1588 changegroup using the extranodes argument. It should be a dict
1587 changegroup using the extranodes argument. It should be a dict
1589 where the keys are the filenames (or 1 for the manifest), and the
1588 where the keys are the filenames (or 1 for the manifest), and the
1590 values are lists of (node, linknode) tuples, where node is a wanted
1589 values are lists of (node, linknode) tuples, where node is a wanted
1591 node and linknode is the changelog node that should be transmitted as
1590 node and linknode is the changelog node that should be transmitted as
1592 the linkrev.
1591 the linkrev.
1593 """
1592 """
1594
1593
1595 self.hook('preoutgoing', throw=True, source=source)
1594 self.hook('preoutgoing', throw=True, source=source)
1596
1595
1597 # Set up some initial variables
1596 # Set up some initial variables
1598 # Make it easy to refer to self.changelog
1597 # Make it easy to refer to self.changelog
1599 cl = self.changelog
1598 cl = self.changelog
1600 # msng is short for missing - compute the list of changesets in this
1599 # msng is short for missing - compute the list of changesets in this
1601 # changegroup.
1600 # changegroup.
1602 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1601 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1603 self.changegroupinfo(msng_cl_lst, source)
1602 self.changegroupinfo(msng_cl_lst, source)
1604 # Some bases may turn out to be superfluous, and some heads may be
1603 # Some bases may turn out to be superfluous, and some heads may be
1605 # too. nodesbetween will return the minimal set of bases and heads
1604 # too. nodesbetween will return the minimal set of bases and heads
1606 # necessary to re-create the changegroup.
1605 # necessary to re-create the changegroup.
1607
1606
1608 # Known heads are the list of heads that it is assumed the recipient
1607 # Known heads are the list of heads that it is assumed the recipient
1609 # of this changegroup will know about.
1608 # of this changegroup will know about.
1610 knownheads = {}
1609 knownheads = {}
1611 # We assume that all parents of bases are known heads.
1610 # We assume that all parents of bases are known heads.
1612 for n in bases:
1611 for n in bases:
1613 for p in cl.parents(n):
1612 for p in cl.parents(n):
1614 if p != nullid:
1613 if p != nullid:
1615 knownheads[p] = 1
1614 knownheads[p] = 1
1616 knownheads = knownheads.keys()
1615 knownheads = knownheads.keys()
1617 if knownheads:
1616 if knownheads:
1618 # Now that we know what heads are known, we can compute which
1617 # Now that we know what heads are known, we can compute which
1619 # changesets are known. The recipient must know about all
1618 # changesets are known. The recipient must know about all
1620 # changesets required to reach the known heads from the null
1619 # changesets required to reach the known heads from the null
1621 # changeset.
1620 # changeset.
1622 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1621 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1623 junk = None
1622 junk = None
1624 # Transform the list into an ersatz set.
1623 # Transform the list into an ersatz set.
1625 has_cl_set = dict.fromkeys(has_cl_set)
1624 has_cl_set = dict.fromkeys(has_cl_set)
1626 else:
1625 else:
1627 # If there were no known heads, the recipient cannot be assumed to
1626 # If there were no known heads, the recipient cannot be assumed to
1628 # know about any changesets.
1627 # know about any changesets.
1629 has_cl_set = {}
1628 has_cl_set = {}
1630
1629
1631 # Make it easy to refer to self.manifest
1630 # Make it easy to refer to self.manifest
1632 mnfst = self.manifest
1631 mnfst = self.manifest
1633 # We don't know which manifests are missing yet
1632 # We don't know which manifests are missing yet
1634 msng_mnfst_set = {}
1633 msng_mnfst_set = {}
1635 # Nor do we know which filenodes are missing.
1634 # Nor do we know which filenodes are missing.
1636 msng_filenode_set = {}
1635 msng_filenode_set = {}
1637
1636
1638 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1637 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1639 junk = None
1638 junk = None
1640
1639
1641 # A changeset always belongs to itself, so the changenode lookup
1640 # A changeset always belongs to itself, so the changenode lookup
1642 # function for a changenode is identity.
1641 # function for a changenode is identity.
1643 def identity(x):
1642 def identity(x):
1644 return x
1643 return x
1645
1644
1646 # A function generating function. Sets up an environment for the
1645 # A function generating function. Sets up an environment for the
1647 # inner function.
1646 # inner function.
1648 def cmp_by_rev_func(revlog):
1647 def cmp_by_rev_func(revlog):
1649 # Compare two nodes by their revision number in the environment's
1648 # Compare two nodes by their revision number in the environment's
1650 # revision history. Since the revision number both represents the
1649 # revision history. Since the revision number both represents the
1651 # most efficient order to read the nodes in, and represents a
1650 # most efficient order to read the nodes in, and represents a
1652 # topological sorting of the nodes, this function is often useful.
1651 # topological sorting of the nodes, this function is often useful.
1653 def cmp_by_rev(a, b):
1652 def cmp_by_rev(a, b):
1654 return cmp(revlog.rev(a), revlog.rev(b))
1653 return cmp(revlog.rev(a), revlog.rev(b))
1655 return cmp_by_rev
1654 return cmp_by_rev
1656
1655
1657 # If we determine that a particular file or manifest node must be a
1656 # If we determine that a particular file or manifest node must be a
1658 # node that the recipient of the changegroup will already have, we can
1657 # node that the recipient of the changegroup will already have, we can
1659 # also assume the recipient will have all the parents. This function
1658 # also assume the recipient will have all the parents. This function
1660 # prunes them from the set of missing nodes.
1659 # prunes them from the set of missing nodes.
1661 def prune_parents(revlog, hasset, msngset):
1660 def prune_parents(revlog, hasset, msngset):
1662 haslst = hasset.keys()
1661 haslst = hasset.keys()
1663 haslst.sort(cmp_by_rev_func(revlog))
1662 haslst.sort(cmp_by_rev_func(revlog))
1664 for node in haslst:
1663 for node in haslst:
1665 parentlst = [p for p in revlog.parents(node) if p != nullid]
1664 parentlst = [p for p in revlog.parents(node) if p != nullid]
1666 while parentlst:
1665 while parentlst:
1667 n = parentlst.pop()
1666 n = parentlst.pop()
1668 if n not in hasset:
1667 if n not in hasset:
1669 hasset[n] = 1
1668 hasset[n] = 1
1670 p = [p for p in revlog.parents(n) if p != nullid]
1669 p = [p for p in revlog.parents(n) if p != nullid]
1671 parentlst.extend(p)
1670 parentlst.extend(p)
1672 for n in hasset:
1671 for n in hasset:
1673 msngset.pop(n, None)
1672 msngset.pop(n, None)
1674
1673
1675 # This is a function generating function used to set up an environment
1674 # This is a function generating function used to set up an environment
1676 # for the inner function to execute in.
1675 # for the inner function to execute in.
1677 def manifest_and_file_collector(changedfileset):
1676 def manifest_and_file_collector(changedfileset):
1678 # This is an information gathering function that gathers
1677 # This is an information gathering function that gathers
1679 # information from each changeset node that goes out as part of
1678 # information from each changeset node that goes out as part of
1680 # the changegroup. The information gathered is a list of which
1679 # the changegroup. The information gathered is a list of which
1681 # manifest nodes are potentially required (the recipient may
1680 # manifest nodes are potentially required (the recipient may
1682 # already have them) and total list of all files which were
1681 # already have them) and total list of all files which were
1683 # changed in any changeset in the changegroup.
1682 # changed in any changeset in the changegroup.
1684 #
1683 #
1685 # We also remember the first changenode we saw any manifest
1684 # We also remember the first changenode we saw any manifest
1686 # referenced by so we can later determine which changenode 'owns'
1685 # referenced by so we can later determine which changenode 'owns'
1687 # the manifest.
1686 # the manifest.
1688 def collect_manifests_and_files(clnode):
1687 def collect_manifests_and_files(clnode):
1689 c = cl.read(clnode)
1688 c = cl.read(clnode)
1690 for f in c[3]:
1689 for f in c[3]:
1691 # This is to make sure we only have one instance of each
1690 # This is to make sure we only have one instance of each
1692 # filename string for each filename.
1691 # filename string for each filename.
1693 changedfileset.setdefault(f, f)
1692 changedfileset.setdefault(f, f)
1694 msng_mnfst_set.setdefault(c[0], clnode)
1693 msng_mnfst_set.setdefault(c[0], clnode)
1695 return collect_manifests_and_files
1694 return collect_manifests_and_files
1696
1695
1697 # Figure out which manifest nodes (of the ones we think might be part
1696 # Figure out which manifest nodes (of the ones we think might be part
1698 # of the changegroup) the recipient must know about and remove them
1697 # of the changegroup) the recipient must know about and remove them
1699 # from the changegroup.
1698 # from the changegroup.
1700 def prune_manifests():
1699 def prune_manifests():
1701 has_mnfst_set = {}
1700 has_mnfst_set = {}
1702 for n in msng_mnfst_set:
1701 for n in msng_mnfst_set:
1703 # If a 'missing' manifest thinks it belongs to a changenode
1702 # If a 'missing' manifest thinks it belongs to a changenode
1704 # the recipient is assumed to have, obviously the recipient
1703 # the recipient is assumed to have, obviously the recipient
1705 # must have that manifest.
1704 # must have that manifest.
1706 linknode = cl.node(mnfst.linkrev(n))
1705 linknode = cl.node(mnfst.linkrev(n))
1707 if linknode in has_cl_set:
1706 if linknode in has_cl_set:
1708 has_mnfst_set[n] = 1
1707 has_mnfst_set[n] = 1
1709 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1708 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1710
1709
1711 # Use the information collected in collect_manifests_and_files to say
1710 # Use the information collected in collect_manifests_and_files to say
1712 # which changenode any manifestnode belongs to.
1711 # which changenode any manifestnode belongs to.
1713 def lookup_manifest_link(mnfstnode):
1712 def lookup_manifest_link(mnfstnode):
1714 return msng_mnfst_set[mnfstnode]
1713 return msng_mnfst_set[mnfstnode]
1715
1714
1716 # A function generating function that sets up the initial environment
1715 # A function generating function that sets up the initial environment
1717 # the inner function.
1716 # the inner function.
1718 def filenode_collector(changedfiles):
1717 def filenode_collector(changedfiles):
1719 next_rev = [0]
1718 next_rev = [0]
1720 # This gathers information from each manifestnode included in the
1719 # This gathers information from each manifestnode included in the
1721 # changegroup about which filenodes the manifest node references
1720 # changegroup about which filenodes the manifest node references
1722 # so we can include those in the changegroup too.
1721 # so we can include those in the changegroup too.
1723 #
1722 #
1724 # It also remembers which changenode each filenode belongs to. It
1723 # It also remembers which changenode each filenode belongs to. It
1725 # does this by assuming the a filenode belongs to the changenode
1724 # does this by assuming the a filenode belongs to the changenode
1726 # the first manifest that references it belongs to.
1725 # the first manifest that references it belongs to.
1727 def collect_msng_filenodes(mnfstnode):
1726 def collect_msng_filenodes(mnfstnode):
1728 r = mnfst.rev(mnfstnode)
1727 r = mnfst.rev(mnfstnode)
1729 if r == next_rev[0]:
1728 if r == next_rev[0]:
1730 # If the last rev we looked at was the one just previous,
1729 # If the last rev we looked at was the one just previous,
1731 # we only need to see a diff.
1730 # we only need to see a diff.
1732 deltamf = mnfst.readdelta(mnfstnode)
1731 deltamf = mnfst.readdelta(mnfstnode)
1733 # For each line in the delta
1732 # For each line in the delta
1734 for f, fnode in deltamf.items():
1733 for f, fnode in deltamf.items():
1735 f = changedfiles.get(f, None)
1734 f = changedfiles.get(f, None)
1736 # And if the file is in the list of files we care
1735 # And if the file is in the list of files we care
1737 # about.
1736 # about.
1738 if f is not None:
1737 if f is not None:
1739 # Get the changenode this manifest belongs to
1738 # Get the changenode this manifest belongs to
1740 clnode = msng_mnfst_set[mnfstnode]
1739 clnode = msng_mnfst_set[mnfstnode]
1741 # Create the set of filenodes for the file if
1740 # Create the set of filenodes for the file if
1742 # there isn't one already.
1741 # there isn't one already.
1743 ndset = msng_filenode_set.setdefault(f, {})
1742 ndset = msng_filenode_set.setdefault(f, {})
1744 # And set the filenode's changelog node to the
1743 # And set the filenode's changelog node to the
1745 # manifest's if it hasn't been set already.
1744 # manifest's if it hasn't been set already.
1746 ndset.setdefault(fnode, clnode)
1745 ndset.setdefault(fnode, clnode)
1747 else:
1746 else:
1748 # Otherwise we need a full manifest.
1747 # Otherwise we need a full manifest.
1749 m = mnfst.read(mnfstnode)
1748 m = mnfst.read(mnfstnode)
1750 # For every file in we care about.
1749 # For every file in we care about.
1751 for f in changedfiles:
1750 for f in changedfiles:
1752 fnode = m.get(f, None)
1751 fnode = m.get(f, None)
1753 # If it's in the manifest
1752 # If it's in the manifest
1754 if fnode is not None:
1753 if fnode is not None:
1755 # See comments above.
1754 # See comments above.
1756 clnode = msng_mnfst_set[mnfstnode]
1755 clnode = msng_mnfst_set[mnfstnode]
1757 ndset = msng_filenode_set.setdefault(f, {})
1756 ndset = msng_filenode_set.setdefault(f, {})
1758 ndset.setdefault(fnode, clnode)
1757 ndset.setdefault(fnode, clnode)
1759 # Remember the revision we hope to see next.
1758 # Remember the revision we hope to see next.
1760 next_rev[0] = r + 1
1759 next_rev[0] = r + 1
1761 return collect_msng_filenodes
1760 return collect_msng_filenodes
1762
1761
1763 # We have a list of filenodes we think we need for a file, lets remove
1762 # We have a list of filenodes we think we need for a file, lets remove
1764 # all those we now the recipient must have.
1763 # all those we now the recipient must have.
1765 def prune_filenodes(f, filerevlog):
1764 def prune_filenodes(f, filerevlog):
1766 msngset = msng_filenode_set[f]
1765 msngset = msng_filenode_set[f]
1767 hasset = {}
1766 hasset = {}
1768 # If a 'missing' filenode thinks it belongs to a changenode we
1767 # If a 'missing' filenode thinks it belongs to a changenode we
1769 # assume the recipient must have, then the recipient must have
1768 # assume the recipient must have, then the recipient must have
1770 # that filenode.
1769 # that filenode.
1771 for n in msngset:
1770 for n in msngset:
1772 clnode = cl.node(filerevlog.linkrev(n))
1771 clnode = cl.node(filerevlog.linkrev(n))
1773 if clnode in has_cl_set:
1772 if clnode in has_cl_set:
1774 hasset[n] = 1
1773 hasset[n] = 1
1775 prune_parents(filerevlog, hasset, msngset)
1774 prune_parents(filerevlog, hasset, msngset)
1776
1775
1777 # A function generator function that sets up the a context for the
1776 # A function generator function that sets up the a context for the
1778 # inner function.
1777 # inner function.
1779 def lookup_filenode_link_func(fname):
1778 def lookup_filenode_link_func(fname):
1780 msngset = msng_filenode_set[fname]
1779 msngset = msng_filenode_set[fname]
1781 # Lookup the changenode the filenode belongs to.
1780 # Lookup the changenode the filenode belongs to.
1782 def lookup_filenode_link(fnode):
1781 def lookup_filenode_link(fnode):
1783 return msngset[fnode]
1782 return msngset[fnode]
1784 return lookup_filenode_link
1783 return lookup_filenode_link
1785
1784
1786 # Add the nodes that were explicitly requested.
1785 # Add the nodes that were explicitly requested.
1787 def add_extra_nodes(name, nodes):
1786 def add_extra_nodes(name, nodes):
1788 if not extranodes or name not in extranodes:
1787 if not extranodes or name not in extranodes:
1789 return
1788 return
1790
1789
1791 for node, linknode in extranodes[name]:
1790 for node, linknode in extranodes[name]:
1792 if node not in nodes:
1791 if node not in nodes:
1793 nodes[node] = linknode
1792 nodes[node] = linknode
1794
1793
1795 # Now that we have all theses utility functions to help out and
1794 # Now that we have all theses utility functions to help out and
1796 # logically divide up the task, generate the group.
1795 # logically divide up the task, generate the group.
1797 def gengroup():
1796 def gengroup():
1798 # The set of changed files starts empty.
1797 # The set of changed files starts empty.
1799 changedfiles = {}
1798 changedfiles = {}
1800 # Create a changenode group generator that will call our functions
1799 # Create a changenode group generator that will call our functions
1801 # back to lookup the owning changenode and collect information.
1800 # back to lookup the owning changenode and collect information.
1802 group = cl.group(msng_cl_lst, identity,
1801 group = cl.group(msng_cl_lst, identity,
1803 manifest_and_file_collector(changedfiles))
1802 manifest_and_file_collector(changedfiles))
1804 for chnk in group:
1803 for chnk in group:
1805 yield chnk
1804 yield chnk
1806
1805
1807 # The list of manifests has been collected by the generator
1806 # The list of manifests has been collected by the generator
1808 # calling our functions back.
1807 # calling our functions back.
1809 prune_manifests()
1808 prune_manifests()
1810 add_extra_nodes(1, msng_mnfst_set)
1809 add_extra_nodes(1, msng_mnfst_set)
1811 msng_mnfst_lst = msng_mnfst_set.keys()
1810 msng_mnfst_lst = msng_mnfst_set.keys()
1812 # Sort the manifestnodes by revision number.
1811 # Sort the manifestnodes by revision number.
1813 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1812 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1814 # Create a generator for the manifestnodes that calls our lookup
1813 # Create a generator for the manifestnodes that calls our lookup
1815 # and data collection functions back.
1814 # and data collection functions back.
1816 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1815 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1817 filenode_collector(changedfiles))
1816 filenode_collector(changedfiles))
1818 for chnk in group:
1817 for chnk in group:
1819 yield chnk
1818 yield chnk
1820
1819
1821 # These are no longer needed, dereference and toss the memory for
1820 # These are no longer needed, dereference and toss the memory for
1822 # them.
1821 # them.
1823 msng_mnfst_lst = None
1822 msng_mnfst_lst = None
1824 msng_mnfst_set.clear()
1823 msng_mnfst_set.clear()
1825
1824
1826 if extranodes:
1825 if extranodes:
1827 for fname in extranodes:
1826 for fname in extranodes:
1828 if isinstance(fname, int):
1827 if isinstance(fname, int):
1829 continue
1828 continue
1830 add_extra_nodes(fname,
1829 add_extra_nodes(fname,
1831 msng_filenode_set.setdefault(fname, {}))
1830 msng_filenode_set.setdefault(fname, {}))
1832 changedfiles[fname] = 1
1831 changedfiles[fname] = 1
1833 changedfiles = changedfiles.keys()
1832 changedfiles = changedfiles.keys()
1834 changedfiles.sort()
1833 changedfiles.sort()
1835 # Go through all our files in order sorted by name.
1834 # Go through all our files in order sorted by name.
1836 for fname in changedfiles:
1835 for fname in changedfiles:
1837 filerevlog = self.file(fname)
1836 filerevlog = self.file(fname)
1838 if filerevlog.count() == 0:
1837 if filerevlog.count() == 0:
1839 raise util.Abort(_("empty or missing revlog for %s") % fname)
1838 raise util.Abort(_("empty or missing revlog for %s") % fname)
1840 # Toss out the filenodes that the recipient isn't really
1839 # Toss out the filenodes that the recipient isn't really
1841 # missing.
1840 # missing.
1842 if fname in msng_filenode_set:
1841 if fname in msng_filenode_set:
1843 prune_filenodes(fname, filerevlog)
1842 prune_filenodes(fname, filerevlog)
1844 msng_filenode_lst = msng_filenode_set[fname].keys()
1843 msng_filenode_lst = msng_filenode_set[fname].keys()
1845 else:
1844 else:
1846 msng_filenode_lst = []
1845 msng_filenode_lst = []
1847 # If any filenodes are left, generate the group for them,
1846 # If any filenodes are left, generate the group for them,
1848 # otherwise don't bother.
1847 # otherwise don't bother.
1849 if len(msng_filenode_lst) > 0:
1848 if len(msng_filenode_lst) > 0:
1850 yield changegroup.chunkheader(len(fname))
1849 yield changegroup.chunkheader(len(fname))
1851 yield fname
1850 yield fname
1852 # Sort the filenodes by their revision #
1851 # Sort the filenodes by their revision #
1853 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1852 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1854 # Create a group generator and only pass in a changenode
1853 # Create a group generator and only pass in a changenode
1855 # lookup function as we need to collect no information
1854 # lookup function as we need to collect no information
1856 # from filenodes.
1855 # from filenodes.
1857 group = filerevlog.group(msng_filenode_lst,
1856 group = filerevlog.group(msng_filenode_lst,
1858 lookup_filenode_link_func(fname))
1857 lookup_filenode_link_func(fname))
1859 for chnk in group:
1858 for chnk in group:
1860 yield chnk
1859 yield chnk
1861 if fname in msng_filenode_set:
1860 if fname in msng_filenode_set:
1862 # Don't need this anymore, toss it to free memory.
1861 # Don't need this anymore, toss it to free memory.
1863 del msng_filenode_set[fname]
1862 del msng_filenode_set[fname]
1864 # Signal that no more groups are left.
1863 # Signal that no more groups are left.
1865 yield changegroup.closechunk()
1864 yield changegroup.closechunk()
1866
1865
1867 if msng_cl_lst:
1866 if msng_cl_lst:
1868 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1867 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1869
1868
1870 return util.chunkbuffer(gengroup())
1869 return util.chunkbuffer(gengroup())
1871
1870
1872 def changegroup(self, basenodes, source):
1871 def changegroup(self, basenodes, source):
1873 """Generate a changegroup of all nodes that we have that a recipient
1872 """Generate a changegroup of all nodes that we have that a recipient
1874 doesn't.
1873 doesn't.
1875
1874
1876 This is much easier than the previous function as we can assume that
1875 This is much easier than the previous function as we can assume that
1877 the recipient has any changenode we aren't sending them."""
1876 the recipient has any changenode we aren't sending them."""
1878
1877
1879 self.hook('preoutgoing', throw=True, source=source)
1878 self.hook('preoutgoing', throw=True, source=source)
1880
1879
1881 cl = self.changelog
1880 cl = self.changelog
1882 nodes = cl.nodesbetween(basenodes, None)[0]
1881 nodes = cl.nodesbetween(basenodes, None)[0]
1883 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1882 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1884 self.changegroupinfo(nodes, source)
1883 self.changegroupinfo(nodes, source)
1885
1884
1886 def identity(x):
1885 def identity(x):
1887 return x
1886 return x
1888
1887
1889 def gennodelst(revlog):
1888 def gennodelst(revlog):
1890 for r in xrange(0, revlog.count()):
1889 for r in xrange(0, revlog.count()):
1891 n = revlog.node(r)
1890 n = revlog.node(r)
1892 if revlog.linkrev(n) in revset:
1891 if revlog.linkrev(n) in revset:
1893 yield n
1892 yield n
1894
1893
1895 def changed_file_collector(changedfileset):
1894 def changed_file_collector(changedfileset):
1896 def collect_changed_files(clnode):
1895 def collect_changed_files(clnode):
1897 c = cl.read(clnode)
1896 c = cl.read(clnode)
1898 for fname in c[3]:
1897 for fname in c[3]:
1899 changedfileset[fname] = 1
1898 changedfileset[fname] = 1
1900 return collect_changed_files
1899 return collect_changed_files
1901
1900
1902 def lookuprevlink_func(revlog):
1901 def lookuprevlink_func(revlog):
1903 def lookuprevlink(n):
1902 def lookuprevlink(n):
1904 return cl.node(revlog.linkrev(n))
1903 return cl.node(revlog.linkrev(n))
1905 return lookuprevlink
1904 return lookuprevlink
1906
1905
1907 def gengroup():
1906 def gengroup():
1908 # construct a list of all changed files
1907 # construct a list of all changed files
1909 changedfiles = {}
1908 changedfiles = {}
1910
1909
1911 for chnk in cl.group(nodes, identity,
1910 for chnk in cl.group(nodes, identity,
1912 changed_file_collector(changedfiles)):
1911 changed_file_collector(changedfiles)):
1913 yield chnk
1912 yield chnk
1914 changedfiles = changedfiles.keys()
1913 changedfiles = changedfiles.keys()
1915 changedfiles.sort()
1914 changedfiles.sort()
1916
1915
1917 mnfst = self.manifest
1916 mnfst = self.manifest
1918 nodeiter = gennodelst(mnfst)
1917 nodeiter = gennodelst(mnfst)
1919 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1918 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1920 yield chnk
1919 yield chnk
1921
1920
1922 for fname in changedfiles:
1921 for fname in changedfiles:
1923 filerevlog = self.file(fname)
1922 filerevlog = self.file(fname)
1924 if filerevlog.count() == 0:
1923 if filerevlog.count() == 0:
1925 raise util.Abort(_("empty or missing revlog for %s") % fname)
1924 raise util.Abort(_("empty or missing revlog for %s") % fname)
1926 nodeiter = gennodelst(filerevlog)
1925 nodeiter = gennodelst(filerevlog)
1927 nodeiter = list(nodeiter)
1926 nodeiter = list(nodeiter)
1928 if nodeiter:
1927 if nodeiter:
1929 yield changegroup.chunkheader(len(fname))
1928 yield changegroup.chunkheader(len(fname))
1930 yield fname
1929 yield fname
1931 lookup = lookuprevlink_func(filerevlog)
1930 lookup = lookuprevlink_func(filerevlog)
1932 for chnk in filerevlog.group(nodeiter, lookup):
1931 for chnk in filerevlog.group(nodeiter, lookup):
1933 yield chnk
1932 yield chnk
1934
1933
1935 yield changegroup.closechunk()
1934 yield changegroup.closechunk()
1936
1935
1937 if nodes:
1936 if nodes:
1938 self.hook('outgoing', node=hex(nodes[0]), source=source)
1937 self.hook('outgoing', node=hex(nodes[0]), source=source)
1939
1938
1940 return util.chunkbuffer(gengroup())
1939 return util.chunkbuffer(gengroup())
1941
1940
1942 def addchangegroup(self, source, srctype, url, emptyok=False):
1941 def addchangegroup(self, source, srctype, url, emptyok=False):
1943 """add changegroup to repo.
1942 """add changegroup to repo.
1944
1943
1945 return values:
1944 return values:
1946 - nothing changed or no source: 0
1945 - nothing changed or no source: 0
1947 - more heads than before: 1+added heads (2..n)
1946 - more heads than before: 1+added heads (2..n)
1948 - less heads than before: -1-removed heads (-2..-n)
1947 - less heads than before: -1-removed heads (-2..-n)
1949 - number of heads stays the same: 1
1948 - number of heads stays the same: 1
1950 """
1949 """
1951 def csmap(x):
1950 def csmap(x):
1952 self.ui.debug(_("add changeset %s\n") % short(x))
1951 self.ui.debug(_("add changeset %s\n") % short(x))
1953 return cl.count()
1952 return cl.count()
1954
1953
1955 def revmap(x):
1954 def revmap(x):
1956 return cl.rev(x)
1955 return cl.rev(x)
1957
1956
1958 if not source:
1957 if not source:
1959 return 0
1958 return 0
1960
1959
1961 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1960 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1962
1961
1963 changesets = files = revisions = 0
1962 changesets = files = revisions = 0
1964
1963
1965 # write changelog data to temp files so concurrent readers will not see
1964 # write changelog data to temp files so concurrent readers will not see
1966 # inconsistent view
1965 # inconsistent view
1967 cl = self.changelog
1966 cl = self.changelog
1968 cl.delayupdate()
1967 cl.delayupdate()
1969 oldheads = len(cl.heads())
1968 oldheads = len(cl.heads())
1970
1969
1971 tr = self.transaction()
1970 tr = self.transaction()
1972 try:
1971 try:
1973 trp = weakref.proxy(tr)
1972 trp = weakref.proxy(tr)
1974 # pull off the changeset group
1973 # pull off the changeset group
1975 self.ui.status(_("adding changesets\n"))
1974 self.ui.status(_("adding changesets\n"))
1976 cor = cl.count() - 1
1975 cor = cl.count() - 1
1977 chunkiter = changegroup.chunkiter(source)
1976 chunkiter = changegroup.chunkiter(source)
1978 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1977 if cl.addgroup(chunkiter, csmap, trp, 1) is None and not emptyok:
1979 raise util.Abort(_("received changelog group is empty"))
1978 raise util.Abort(_("received changelog group is empty"))
1980 cnr = cl.count() - 1
1979 cnr = cl.count() - 1
1981 changesets = cnr - cor
1980 changesets = cnr - cor
1982
1981
1983 # pull off the manifest group
1982 # pull off the manifest group
1984 self.ui.status(_("adding manifests\n"))
1983 self.ui.status(_("adding manifests\n"))
1985 chunkiter = changegroup.chunkiter(source)
1984 chunkiter = changegroup.chunkiter(source)
1986 # no need to check for empty manifest group here:
1985 # no need to check for empty manifest group here:
1987 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1986 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1988 # no new manifest will be created and the manifest group will
1987 # no new manifest will be created and the manifest group will
1989 # be empty during the pull
1988 # be empty during the pull
1990 self.manifest.addgroup(chunkiter, revmap, trp)
1989 self.manifest.addgroup(chunkiter, revmap, trp)
1991
1990
1992 # process the files
1991 # process the files
1993 self.ui.status(_("adding file changes\n"))
1992 self.ui.status(_("adding file changes\n"))
1994 while 1:
1993 while 1:
1995 f = changegroup.getchunk(source)
1994 f = changegroup.getchunk(source)
1996 if not f:
1995 if not f:
1997 break
1996 break
1998 self.ui.debug(_("adding %s revisions\n") % f)
1997 self.ui.debug(_("adding %s revisions\n") % f)
1999 fl = self.file(f)
1998 fl = self.file(f)
2000 o = fl.count()
1999 o = fl.count()
2001 chunkiter = changegroup.chunkiter(source)
2000 chunkiter = changegroup.chunkiter(source)
2002 if fl.addgroup(chunkiter, revmap, trp) is None:
2001 if fl.addgroup(chunkiter, revmap, trp) is None:
2003 raise util.Abort(_("received file revlog group is empty"))
2002 raise util.Abort(_("received file revlog group is empty"))
2004 revisions += fl.count() - o
2003 revisions += fl.count() - o
2005 files += 1
2004 files += 1
2006
2005
2007 # make changelog see real files again
2006 # make changelog see real files again
2008 cl.finalize(trp)
2007 cl.finalize(trp)
2009
2008
2010 newheads = len(self.changelog.heads())
2009 newheads = len(self.changelog.heads())
2011 heads = ""
2010 heads = ""
2012 if oldheads and newheads != oldheads:
2011 if oldheads and newheads != oldheads:
2013 heads = _(" (%+d heads)") % (newheads - oldheads)
2012 heads = _(" (%+d heads)") % (newheads - oldheads)
2014
2013
2015 self.ui.status(_("added %d changesets"
2014 self.ui.status(_("added %d changesets"
2016 " with %d changes to %d files%s\n")
2015 " with %d changes to %d files%s\n")
2017 % (changesets, revisions, files, heads))
2016 % (changesets, revisions, files, heads))
2018
2017
2019 if changesets > 0:
2018 if changesets > 0:
2020 self.hook('pretxnchangegroup', throw=True,
2019 self.hook('pretxnchangegroup', throw=True,
2021 node=hex(self.changelog.node(cor+1)), source=srctype,
2020 node=hex(self.changelog.node(cor+1)), source=srctype,
2022 url=url)
2021 url=url)
2023
2022
2024 tr.close()
2023 tr.close()
2025 finally:
2024 finally:
2026 del tr
2025 del tr
2027
2026
2028 if changesets > 0:
2027 if changesets > 0:
2029 # forcefully update the on-disk branch cache
2028 # forcefully update the on-disk branch cache
2030 self.ui.debug(_("updating the branch cache\n"))
2029 self.ui.debug(_("updating the branch cache\n"))
2031 self.branchtags()
2030 self.branchtags()
2032 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2031 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2033 source=srctype, url=url)
2032 source=srctype, url=url)
2034
2033
2035 for i in xrange(cor + 1, cnr + 1):
2034 for i in xrange(cor + 1, cnr + 1):
2036 self.hook("incoming", node=hex(self.changelog.node(i)),
2035 self.hook("incoming", node=hex(self.changelog.node(i)),
2037 source=srctype, url=url)
2036 source=srctype, url=url)
2038
2037
2039 # never return 0 here:
2038 # never return 0 here:
2040 if newheads < oldheads:
2039 if newheads < oldheads:
2041 return newheads - oldheads - 1
2040 return newheads - oldheads - 1
2042 else:
2041 else:
2043 return newheads - oldheads + 1
2042 return newheads - oldheads + 1
2044
2043
2045
2044
2046 def stream_in(self, remote):
2045 def stream_in(self, remote):
2047 fp = remote.stream_out()
2046 fp = remote.stream_out()
2048 l = fp.readline()
2047 l = fp.readline()
2049 try:
2048 try:
2050 resp = int(l)
2049 resp = int(l)
2051 except ValueError:
2050 except ValueError:
2052 raise util.UnexpectedOutput(
2051 raise util.UnexpectedOutput(
2053 _('Unexpected response from remote server:'), l)
2052 _('Unexpected response from remote server:'), l)
2054 if resp == 1:
2053 if resp == 1:
2055 raise util.Abort(_('operation forbidden by server'))
2054 raise util.Abort(_('operation forbidden by server'))
2056 elif resp == 2:
2055 elif resp == 2:
2057 raise util.Abort(_('locking the remote repository failed'))
2056 raise util.Abort(_('locking the remote repository failed'))
2058 elif resp != 0:
2057 elif resp != 0:
2059 raise util.Abort(_('the server sent an unknown error code'))
2058 raise util.Abort(_('the server sent an unknown error code'))
2060 self.ui.status(_('streaming all changes\n'))
2059 self.ui.status(_('streaming all changes\n'))
2061 l = fp.readline()
2060 l = fp.readline()
2062 try:
2061 try:
2063 total_files, total_bytes = map(int, l.split(' ', 1))
2062 total_files, total_bytes = map(int, l.split(' ', 1))
2064 except ValueError, TypeError:
2063 except ValueError, TypeError:
2065 raise util.UnexpectedOutput(
2064 raise util.UnexpectedOutput(
2066 _('Unexpected response from remote server:'), l)
2065 _('Unexpected response from remote server:'), l)
2067 self.ui.status(_('%d files to transfer, %s of data\n') %
2066 self.ui.status(_('%d files to transfer, %s of data\n') %
2068 (total_files, util.bytecount(total_bytes)))
2067 (total_files, util.bytecount(total_bytes)))
2069 start = time.time()
2068 start = time.time()
2070 for i in xrange(total_files):
2069 for i in xrange(total_files):
2071 # XXX doesn't support '\n' or '\r' in filenames
2070 # XXX doesn't support '\n' or '\r' in filenames
2072 l = fp.readline()
2071 l = fp.readline()
2073 try:
2072 try:
2074 name, size = l.split('\0', 1)
2073 name, size = l.split('\0', 1)
2075 size = int(size)
2074 size = int(size)
2076 except ValueError, TypeError:
2075 except ValueError, TypeError:
2077 raise util.UnexpectedOutput(
2076 raise util.UnexpectedOutput(
2078 _('Unexpected response from remote server:'), l)
2077 _('Unexpected response from remote server:'), l)
2079 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2078 self.ui.debug('adding %s (%s)\n' % (name, util.bytecount(size)))
2080 ofp = self.sopener(name, 'w')
2079 ofp = self.sopener(name, 'w')
2081 for chunk in util.filechunkiter(fp, limit=size):
2080 for chunk in util.filechunkiter(fp, limit=size):
2082 ofp.write(chunk)
2081 ofp.write(chunk)
2083 ofp.close()
2082 ofp.close()
2084 elapsed = time.time() - start
2083 elapsed = time.time() - start
2085 if elapsed <= 0:
2084 if elapsed <= 0:
2086 elapsed = 0.001
2085 elapsed = 0.001
2087 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2086 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2088 (util.bytecount(total_bytes), elapsed,
2087 (util.bytecount(total_bytes), elapsed,
2089 util.bytecount(total_bytes / elapsed)))
2088 util.bytecount(total_bytes / elapsed)))
2090 self.invalidate()
2089 self.invalidate()
2091 return len(self.heads()) + 1
2090 return len(self.heads()) + 1
2092
2091
2093 def clone(self, remote, heads=[], stream=False):
2092 def clone(self, remote, heads=[], stream=False):
2094 '''clone remote repository.
2093 '''clone remote repository.
2095
2094
2096 keyword arguments:
2095 keyword arguments:
2097 heads: list of revs to clone (forces use of pull)
2096 heads: list of revs to clone (forces use of pull)
2098 stream: use streaming clone if possible'''
2097 stream: use streaming clone if possible'''
2099
2098
2100 # now, all clients that can request uncompressed clones can
2099 # now, all clients that can request uncompressed clones can
2101 # read repo formats supported by all servers that can serve
2100 # read repo formats supported by all servers that can serve
2102 # them.
2101 # them.
2103
2102
2104 # if revlog format changes, client will have to check version
2103 # if revlog format changes, client will have to check version
2105 # and format flags on "stream" capability, and use
2104 # and format flags on "stream" capability, and use
2106 # uncompressed only if compatible.
2105 # uncompressed only if compatible.
2107
2106
2108 if stream and not heads and remote.capable('stream'):
2107 if stream and not heads and remote.capable('stream'):
2109 return self.stream_in(remote)
2108 return self.stream_in(remote)
2110 return self.pull(remote, heads)
2109 return self.pull(remote, heads)
2111
2110
2112 # used to avoid circular references so destructors work
2111 # used to avoid circular references so destructors work
2113 def aftertrans(files):
2112 def aftertrans(files):
2114 renamefiles = [tuple(t) for t in files]
2113 renamefiles = [tuple(t) for t in files]
2115 def a():
2114 def a():
2116 for src, dest in renamefiles:
2115 for src, dest in renamefiles:
2117 util.rename(src, dest)
2116 util.rename(src, dest)
2118 return a
2117 return a
2119
2118
2120 def instance(ui, path, create):
2119 def instance(ui, path, create):
2121 return localrepository(ui, util.drop_scheme('file', path), create)
2120 return localrepository(ui, util.drop_scheme('file', path), create)
2122
2121
2123 def islocal(path):
2122 def islocal(path):
2124 return True
2123 return True
@@ -1,90 +1,90 b''
1 rm 'd/b'
1 rm 'd/b'
2 assuming destination git-repo-hg
2 assuming destination git-repo-hg
3 initializing destination git-repo-hg repository
3 initializing destination git-repo-hg repository
4 scanning source...
4 scanning source...
5 sorting...
5 sorting...
6 converting...
6 converting...
7 5 t1
7 5 t1
8 4 t2
8 4 t2
9 3 t3
9 3 t3
10 2 t4.1
10 2 t4.1
11 1 t4.2
11 1 t4.2
12 0 Merge branch other
12 0 Merge branch other
13 changeset: 5:c6d72c98aa00
13 changeset: 5:4ab1af49a271
14 tag: tip
14 tag: tip
15 parent: 3:a18bdfccf429
15 parent: 3:0222ab0998d7
16 parent: 4:48cb5b72ce56
16 parent: 4:5333c870e3c2
17 user: test <test@example.org>
17 user: test <test@example.org>
18 date: Mon Jan 01 00:00:15 2007 +0000
18 date: Mon Jan 01 00:00:15 2007 +0000
19 files: a
19 files: a
20 description:
20 description:
21 Merge branch other
21 Merge branch other
22
22
23 committer: test <test@example.org>
23 committer: test <test@example.org>
24
24
25
25
26 % full conversion
26 % full conversion
27 o 9 "Discard change to foo" files: foo
27 o 9 "Discard change to foo" files: foo
28 |\
28 |\
29 | o 8 "change foo" files: foo
29 | o 8 "change foo" files: foo
30 | |
30 | |
31 o | 7 "change bar" files: bar
31 o | 7 "change bar" files: bar
32 |/
32 |/
33 o 6 "(octopus merge fixup)" files:
33 o 6 "(octopus merge fixup)" files:
34 |\
34 |\
35 | o 5 "Octopus merge" files: baz
35 | o 5 "Octopus merge" files: baz
36 | |\
36 | |\
37 o | | 4 "add baz" files: baz
37 o | | 4 "add baz" files: baz
38 | | |
38 | | |
39 +---o 3 "add bar" files: bar
39 +---o 3 "add bar" files: bar
40 | |
40 | |
41 o | 2 "add quux" files: quux
41 o | 2 "add quux" files: quux
42 | |
42 | |
43 | o 1 "change foo" files: foo
43 | o 1 "change foo" files: foo
44 |/
44 |/
45 o 0 "add foo" files: foo
45 o 0 "add foo" files: foo
46
46
47 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
47 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
48 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
48 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
49 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
49 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
50 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
50 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
51 % foo bar baz: octopus merge
51 % foo bar baz: octopus merge
52 o 8 "Discard change to foo" files: foo
52 o 8 "Discard change to foo" files: foo
53 |\
53 |\
54 | o 7 "change foo" files: foo
54 | o 7 "change foo" files: foo
55 | |
55 | |
56 o | 6 "change bar" files: bar
56 o | 6 "change bar" files: bar
57 |/
57 |/
58 o 5 "(octopus merge fixup)" files:
58 o 5 "(octopus merge fixup)" files:
59 |\
59 |\
60 | o 4 "Octopus merge" files: baz
60 | o 4 "Octopus merge" files: baz
61 | |\
61 | |\
62 o | | 3 "add baz" files: baz
62 o | | 3 "add baz" files: baz
63 | | |
63 | | |
64 +---o 2 "add bar" files: bar
64 +---o 2 "add bar" files: bar
65 | |
65 | |
66 | o 1 "change foo" files: foo
66 | o 1 "change foo" files: foo
67 |/
67 |/
68 o 0 "add foo" files: foo
68 o 0 "add foo" files: foo
69
69
70 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
70 245a3b8bc653999c2b22cdabd517ccb47aecafdf 644 bar
71 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
71 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
72 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
72 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
73 % foo baz quux: only some parents of an octopus merge; "discard" a head
73 % foo baz quux: only some parents of an octopus merge; "discard" a head
74 o 6 "Discard change to foo" files: foo
74 o 6 "Discard change to foo" files: foo
75 |
75 |
76 o 5 "change foo" files: foo
76 o 5 "change foo" files: foo
77 |
77 |
78 o 4 "Octopus merge" files:
78 o 4 "Octopus merge" files:
79 |\
79 |\
80 | o 3 "add baz" files: baz
80 | o 3 "add baz" files: baz
81 | |
81 | |
82 | o 2 "add quux" files: quux
82 | o 2 "add quux" files: quux
83 | |
83 | |
84 o | 1 "change foo" files: foo
84 o | 1 "change foo" files: foo
85 |/
85 |/
86 o 0 "add foo" files: foo
86 o 0 "add foo" files: foo
87
87
88 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
88 354ae8da6e890359ef49ade27b68bbc361f3ca88 644 baz
89 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
89 9277c9cc8dd4576fc01a17939b4351e5ada93466 644 foo
90 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
90 88dfeab657e8cf2cef3dec67b914f49791ae76b1 644 quux
General Comments 0
You need to be logged in to leave comments. Login now