##// END OF EJS Templates
store all heads of a branch in the branch cache...
John Mulligan -
r7654:816b708f default
parent child Browse files
Show More
@@ -1,2151 +1,2131
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005-2007 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 from node import bin, hex, nullid, nullrev, short
8 from node import bin, hex, nullid, nullrev, short
9 from i18n import _
9 from i18n import _
10 import repo, changegroup
10 import repo, changegroup
11 import changelog, dirstate, filelog, manifest, context, weakref
11 import changelog, dirstate, filelog, manifest, context, weakref
12 import lock, transaction, stat, errno, ui, store
12 import lock, transaction, stat, errno, ui, store
13 import os, time, util, extensions, hook, inspect, error
13 import os, time, util, extensions, hook, inspect, error
14 import match as match_
14 import match as match_
15 import merge as merge_
15 import merge as merge_
16
16
17 class localrepository(repo.repository):
17 class localrepository(repo.repository):
18 capabilities = util.set(('lookup', 'changegroupsubset'))
18 capabilities = util.set(('lookup', 'changegroupsubset'))
19 supported = ('revlogv1', 'store', 'fncache')
19 supported = ('revlogv1', 'store', 'fncache')
20
20
21 def __init__(self, parentui, path=None, create=0):
21 def __init__(self, parentui, path=None, create=0):
22 repo.repository.__init__(self)
22 repo.repository.__init__(self)
23 self.root = os.path.realpath(path)
23 self.root = os.path.realpath(path)
24 self.path = os.path.join(self.root, ".hg")
24 self.path = os.path.join(self.root, ".hg")
25 self.origroot = path
25 self.origroot = path
26 self.opener = util.opener(self.path)
26 self.opener = util.opener(self.path)
27 self.wopener = util.opener(self.root)
27 self.wopener = util.opener(self.root)
28
28
29 if not os.path.isdir(self.path):
29 if not os.path.isdir(self.path):
30 if create:
30 if create:
31 if not os.path.exists(path):
31 if not os.path.exists(path):
32 os.mkdir(path)
32 os.mkdir(path)
33 os.mkdir(self.path)
33 os.mkdir(self.path)
34 requirements = ["revlogv1"]
34 requirements = ["revlogv1"]
35 if parentui.configbool('format', 'usestore', True):
35 if parentui.configbool('format', 'usestore', True):
36 os.mkdir(os.path.join(self.path, "store"))
36 os.mkdir(os.path.join(self.path, "store"))
37 requirements.append("store")
37 requirements.append("store")
38 if parentui.configbool('format', 'usefncache', True):
38 if parentui.configbool('format', 'usefncache', True):
39 requirements.append("fncache")
39 requirements.append("fncache")
40 # create an invalid changelog
40 # create an invalid changelog
41 self.opener("00changelog.i", "a").write(
41 self.opener("00changelog.i", "a").write(
42 '\0\0\0\2' # represents revlogv2
42 '\0\0\0\2' # represents revlogv2
43 ' dummy changelog to prevent using the old repo layout'
43 ' dummy changelog to prevent using the old repo layout'
44 )
44 )
45 reqfile = self.opener("requires", "w")
45 reqfile = self.opener("requires", "w")
46 for r in requirements:
46 for r in requirements:
47 reqfile.write("%s\n" % r)
47 reqfile.write("%s\n" % r)
48 reqfile.close()
48 reqfile.close()
49 else:
49 else:
50 raise error.RepoError(_("repository %s not found") % path)
50 raise error.RepoError(_("repository %s not found") % path)
51 elif create:
51 elif create:
52 raise error.RepoError(_("repository %s already exists") % path)
52 raise error.RepoError(_("repository %s already exists") % path)
53 else:
53 else:
54 # find requirements
54 # find requirements
55 requirements = []
55 requirements = []
56 try:
56 try:
57 requirements = self.opener("requires").read().splitlines()
57 requirements = self.opener("requires").read().splitlines()
58 for r in requirements:
58 for r in requirements:
59 if r not in self.supported:
59 if r not in self.supported:
60 raise error.RepoError(_("requirement '%s' not supported") % r)
60 raise error.RepoError(_("requirement '%s' not supported") % r)
61 except IOError, inst:
61 except IOError, inst:
62 if inst.errno != errno.ENOENT:
62 if inst.errno != errno.ENOENT:
63 raise
63 raise
64
64
65 self.store = store.store(requirements, self.path, util.opener)
65 self.store = store.store(requirements, self.path, util.opener)
66 self.spath = self.store.path
66 self.spath = self.store.path
67 self.sopener = self.store.opener
67 self.sopener = self.store.opener
68 self.sjoin = self.store.join
68 self.sjoin = self.store.join
69 self.opener.createmode = self.store.createmode
69 self.opener.createmode = self.store.createmode
70
70
71 self.ui = ui.ui(parentui=parentui)
71 self.ui = ui.ui(parentui=parentui)
72 try:
72 try:
73 self.ui.readconfig(self.join("hgrc"), self.root)
73 self.ui.readconfig(self.join("hgrc"), self.root)
74 extensions.loadall(self.ui)
74 extensions.loadall(self.ui)
75 except IOError:
75 except IOError:
76 pass
76 pass
77
77
78 self.tagscache = None
78 self.tagscache = None
79 self._tagstypecache = None
79 self._tagstypecache = None
80 self.branchcache = None
80 self.branchcache = None
81 self._ubranchcache = None # UTF-8 version of branchcache
81 self._ubranchcache = None # UTF-8 version of branchcache
82 self._branchcachetip = None
82 self._branchcachetip = None
83 self.nodetagscache = None
83 self.nodetagscache = None
84 self.filterpats = {}
84 self.filterpats = {}
85 self._datafilters = {}
85 self._datafilters = {}
86 self._transref = self._lockref = self._wlockref = None
86 self._transref = self._lockref = self._wlockref = None
87
87
88 def __getattr__(self, name):
88 def __getattr__(self, name):
89 if name == 'changelog':
89 if name == 'changelog':
90 self.changelog = changelog.changelog(self.sopener)
90 self.changelog = changelog.changelog(self.sopener)
91 self.sopener.defversion = self.changelog.version
91 self.sopener.defversion = self.changelog.version
92 return self.changelog
92 return self.changelog
93 if name == 'manifest':
93 if name == 'manifest':
94 self.changelog
94 self.changelog
95 self.manifest = manifest.manifest(self.sopener)
95 self.manifest = manifest.manifest(self.sopener)
96 return self.manifest
96 return self.manifest
97 if name == 'dirstate':
97 if name == 'dirstate':
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
98 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
99 return self.dirstate
99 return self.dirstate
100 else:
100 else:
101 raise AttributeError(name)
101 raise AttributeError(name)
102
102
103 def __getitem__(self, changeid):
103 def __getitem__(self, changeid):
104 if changeid == None:
104 if changeid == None:
105 return context.workingctx(self)
105 return context.workingctx(self)
106 return context.changectx(self, changeid)
106 return context.changectx(self, changeid)
107
107
108 def __nonzero__(self):
108 def __nonzero__(self):
109 return True
109 return True
110
110
111 def __len__(self):
111 def __len__(self):
112 return len(self.changelog)
112 return len(self.changelog)
113
113
114 def __iter__(self):
114 def __iter__(self):
115 for i in xrange(len(self)):
115 for i in xrange(len(self)):
116 yield i
116 yield i
117
117
118 def url(self):
118 def url(self):
119 return 'file:' + self.root
119 return 'file:' + self.root
120
120
121 def hook(self, name, throw=False, **args):
121 def hook(self, name, throw=False, **args):
122 return hook.hook(self.ui, self, name, throw, **args)
122 return hook.hook(self.ui, self, name, throw, **args)
123
123
124 tag_disallowed = ':\r\n'
124 tag_disallowed = ':\r\n'
125
125
126 def _tag(self, names, node, message, local, user, date, parent=None,
126 def _tag(self, names, node, message, local, user, date, parent=None,
127 extra={}):
127 extra={}):
128 use_dirstate = parent is None
128 use_dirstate = parent is None
129
129
130 if isinstance(names, str):
130 if isinstance(names, str):
131 allchars = names
131 allchars = names
132 names = (names,)
132 names = (names,)
133 else:
133 else:
134 allchars = ''.join(names)
134 allchars = ''.join(names)
135 for c in self.tag_disallowed:
135 for c in self.tag_disallowed:
136 if c in allchars:
136 if c in allchars:
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
137 raise util.Abort(_('%r cannot be used in a tag name') % c)
138
138
139 for name in names:
139 for name in names:
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
140 self.hook('pretag', throw=True, node=hex(node), tag=name,
141 local=local)
141 local=local)
142
142
143 def writetags(fp, names, munge, prevtags):
143 def writetags(fp, names, munge, prevtags):
144 fp.seek(0, 2)
144 fp.seek(0, 2)
145 if prevtags and prevtags[-1] != '\n':
145 if prevtags and prevtags[-1] != '\n':
146 fp.write('\n')
146 fp.write('\n')
147 for name in names:
147 for name in names:
148 m = munge and munge(name) or name
148 m = munge and munge(name) or name
149 if self._tagstypecache and name in self._tagstypecache:
149 if self._tagstypecache and name in self._tagstypecache:
150 old = self.tagscache.get(name, nullid)
150 old = self.tagscache.get(name, nullid)
151 fp.write('%s %s\n' % (hex(old), m))
151 fp.write('%s %s\n' % (hex(old), m))
152 fp.write('%s %s\n' % (hex(node), m))
152 fp.write('%s %s\n' % (hex(node), m))
153 fp.close()
153 fp.close()
154
154
155 prevtags = ''
155 prevtags = ''
156 if local:
156 if local:
157 try:
157 try:
158 fp = self.opener('localtags', 'r+')
158 fp = self.opener('localtags', 'r+')
159 except IOError, err:
159 except IOError, err:
160 fp = self.opener('localtags', 'a')
160 fp = self.opener('localtags', 'a')
161 else:
161 else:
162 prevtags = fp.read()
162 prevtags = fp.read()
163
163
164 # local tags are stored in the current charset
164 # local tags are stored in the current charset
165 writetags(fp, names, None, prevtags)
165 writetags(fp, names, None, prevtags)
166 for name in names:
166 for name in names:
167 self.hook('tag', node=hex(node), tag=name, local=local)
167 self.hook('tag', node=hex(node), tag=name, local=local)
168 return
168 return
169
169
170 if use_dirstate:
170 if use_dirstate:
171 try:
171 try:
172 fp = self.wfile('.hgtags', 'rb+')
172 fp = self.wfile('.hgtags', 'rb+')
173 except IOError, err:
173 except IOError, err:
174 fp = self.wfile('.hgtags', 'ab')
174 fp = self.wfile('.hgtags', 'ab')
175 else:
175 else:
176 prevtags = fp.read()
176 prevtags = fp.read()
177 else:
177 else:
178 try:
178 try:
179 prevtags = self.filectx('.hgtags', parent).data()
179 prevtags = self.filectx('.hgtags', parent).data()
180 except error.LookupError:
180 except error.LookupError:
181 pass
181 pass
182 fp = self.wfile('.hgtags', 'wb')
182 fp = self.wfile('.hgtags', 'wb')
183 if prevtags:
183 if prevtags:
184 fp.write(prevtags)
184 fp.write(prevtags)
185
185
186 # committed tags are stored in UTF-8
186 # committed tags are stored in UTF-8
187 writetags(fp, names, util.fromlocal, prevtags)
187 writetags(fp, names, util.fromlocal, prevtags)
188
188
189 if use_dirstate and '.hgtags' not in self.dirstate:
189 if use_dirstate and '.hgtags' not in self.dirstate:
190 self.add(['.hgtags'])
190 self.add(['.hgtags'])
191
191
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
192 tagnode = self.commit(['.hgtags'], message, user, date, p1=parent,
193 extra=extra)
193 extra=extra)
194
194
195 for name in names:
195 for name in names:
196 self.hook('tag', node=hex(node), tag=name, local=local)
196 self.hook('tag', node=hex(node), tag=name, local=local)
197
197
198 return tagnode
198 return tagnode
199
199
200 def tag(self, names, node, message, local, user, date):
200 def tag(self, names, node, message, local, user, date):
201 '''tag a revision with one or more symbolic names.
201 '''tag a revision with one or more symbolic names.
202
202
203 names is a list of strings or, when adding a single tag, names may be a
203 names is a list of strings or, when adding a single tag, names may be a
204 string.
204 string.
205
205
206 if local is True, the tags are stored in a per-repository file.
206 if local is True, the tags are stored in a per-repository file.
207 otherwise, they are stored in the .hgtags file, and a new
207 otherwise, they are stored in the .hgtags file, and a new
208 changeset is committed with the change.
208 changeset is committed with the change.
209
209
210 keyword arguments:
210 keyword arguments:
211
211
212 local: whether to store tags in non-version-controlled file
212 local: whether to store tags in non-version-controlled file
213 (default False)
213 (default False)
214
214
215 message: commit message to use if committing
215 message: commit message to use if committing
216
216
217 user: name of user to use if committing
217 user: name of user to use if committing
218
218
219 date: date tuple to use if committing'''
219 date: date tuple to use if committing'''
220
220
221 for x in self.status()[:5]:
221 for x in self.status()[:5]:
222 if '.hgtags' in x:
222 if '.hgtags' in x:
223 raise util.Abort(_('working copy of .hgtags is changed '
223 raise util.Abort(_('working copy of .hgtags is changed '
224 '(please commit .hgtags manually)'))
224 '(please commit .hgtags manually)'))
225
225
226 self._tag(names, node, message, local, user, date)
226 self._tag(names, node, message, local, user, date)
227
227
228 def tags(self):
228 def tags(self):
229 '''return a mapping of tag to node'''
229 '''return a mapping of tag to node'''
230 if self.tagscache:
230 if self.tagscache:
231 return self.tagscache
231 return self.tagscache
232
232
233 globaltags = {}
233 globaltags = {}
234 tagtypes = {}
234 tagtypes = {}
235
235
236 def readtags(lines, fn, tagtype):
236 def readtags(lines, fn, tagtype):
237 filetags = {}
237 filetags = {}
238 count = 0
238 count = 0
239
239
240 def warn(msg):
240 def warn(msg):
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
241 self.ui.warn(_("%s, line %s: %s\n") % (fn, count, msg))
242
242
243 for l in lines:
243 for l in lines:
244 count += 1
244 count += 1
245 if not l:
245 if not l:
246 continue
246 continue
247 s = l.split(" ", 1)
247 s = l.split(" ", 1)
248 if len(s) != 2:
248 if len(s) != 2:
249 warn(_("cannot parse entry"))
249 warn(_("cannot parse entry"))
250 continue
250 continue
251 node, key = s
251 node, key = s
252 key = util.tolocal(key.strip()) # stored in UTF-8
252 key = util.tolocal(key.strip()) # stored in UTF-8
253 try:
253 try:
254 bin_n = bin(node)
254 bin_n = bin(node)
255 except TypeError:
255 except TypeError:
256 warn(_("node '%s' is not well formed") % node)
256 warn(_("node '%s' is not well formed") % node)
257 continue
257 continue
258 if bin_n not in self.changelog.nodemap:
258 if bin_n not in self.changelog.nodemap:
259 warn(_("tag '%s' refers to unknown node") % key)
259 warn(_("tag '%s' refers to unknown node") % key)
260 continue
260 continue
261
261
262 h = []
262 h = []
263 if key in filetags:
263 if key in filetags:
264 n, h = filetags[key]
264 n, h = filetags[key]
265 h.append(n)
265 h.append(n)
266 filetags[key] = (bin_n, h)
266 filetags[key] = (bin_n, h)
267
267
268 for k, nh in filetags.iteritems():
268 for k, nh in filetags.iteritems():
269 if k not in globaltags:
269 if k not in globaltags:
270 globaltags[k] = nh
270 globaltags[k] = nh
271 tagtypes[k] = tagtype
271 tagtypes[k] = tagtype
272 continue
272 continue
273
273
274 # we prefer the global tag if:
274 # we prefer the global tag if:
275 # it supercedes us OR
275 # it supercedes us OR
276 # mutual supercedes and it has a higher rank
276 # mutual supercedes and it has a higher rank
277 # otherwise we win because we're tip-most
277 # otherwise we win because we're tip-most
278 an, ah = nh
278 an, ah = nh
279 bn, bh = globaltags[k]
279 bn, bh = globaltags[k]
280 if (bn != an and an in bh and
280 if (bn != an and an in bh and
281 (bn not in ah or len(bh) > len(ah))):
281 (bn not in ah or len(bh) > len(ah))):
282 an = bn
282 an = bn
283 ah.extend([n for n in bh if n not in ah])
283 ah.extend([n for n in bh if n not in ah])
284 globaltags[k] = an, ah
284 globaltags[k] = an, ah
285 tagtypes[k] = tagtype
285 tagtypes[k] = tagtype
286
286
287 # read the tags file from each head, ending with the tip
287 # read the tags file from each head, ending with the tip
288 f = None
288 f = None
289 for rev, node, fnode in self._hgtagsnodes():
289 for rev, node, fnode in self._hgtagsnodes():
290 f = (f and f.filectx(fnode) or
290 f = (f and f.filectx(fnode) or
291 self.filectx('.hgtags', fileid=fnode))
291 self.filectx('.hgtags', fileid=fnode))
292 readtags(f.data().splitlines(), f, "global")
292 readtags(f.data().splitlines(), f, "global")
293
293
294 try:
294 try:
295 data = util.fromlocal(self.opener("localtags").read())
295 data = util.fromlocal(self.opener("localtags").read())
296 # localtags are stored in the local character set
296 # localtags are stored in the local character set
297 # while the internal tag table is stored in UTF-8
297 # while the internal tag table is stored in UTF-8
298 readtags(data.splitlines(), "localtags", "local")
298 readtags(data.splitlines(), "localtags", "local")
299 except IOError:
299 except IOError:
300 pass
300 pass
301
301
302 self.tagscache = {}
302 self.tagscache = {}
303 self._tagstypecache = {}
303 self._tagstypecache = {}
304 for k, nh in globaltags.iteritems():
304 for k, nh in globaltags.iteritems():
305 n = nh[0]
305 n = nh[0]
306 if n != nullid:
306 if n != nullid:
307 self.tagscache[k] = n
307 self.tagscache[k] = n
308 self._tagstypecache[k] = tagtypes[k]
308 self._tagstypecache[k] = tagtypes[k]
309 self.tagscache['tip'] = self.changelog.tip()
309 self.tagscache['tip'] = self.changelog.tip()
310 return self.tagscache
310 return self.tagscache
311
311
312 def tagtype(self, tagname):
312 def tagtype(self, tagname):
313 '''
313 '''
314 return the type of the given tag. result can be:
314 return the type of the given tag. result can be:
315
315
316 'local' : a local tag
316 'local' : a local tag
317 'global' : a global tag
317 'global' : a global tag
318 None : tag does not exist
318 None : tag does not exist
319 '''
319 '''
320
320
321 self.tags()
321 self.tags()
322
322
323 return self._tagstypecache.get(tagname)
323 return self._tagstypecache.get(tagname)
324
324
325 def _hgtagsnodes(self):
325 def _hgtagsnodes(self):
326 heads = self.heads()
326 heads = self.heads()
327 heads.reverse()
327 heads.reverse()
328 last = {}
328 last = {}
329 ret = []
329 ret = []
330 for node in heads:
330 for node in heads:
331 c = self[node]
331 c = self[node]
332 rev = c.rev()
332 rev = c.rev()
333 try:
333 try:
334 fnode = c.filenode('.hgtags')
334 fnode = c.filenode('.hgtags')
335 except error.LookupError:
335 except error.LookupError:
336 continue
336 continue
337 ret.append((rev, node, fnode))
337 ret.append((rev, node, fnode))
338 if fnode in last:
338 if fnode in last:
339 ret[last[fnode]] = None
339 ret[last[fnode]] = None
340 last[fnode] = len(ret) - 1
340 last[fnode] = len(ret) - 1
341 return [item for item in ret if item]
341 return [item for item in ret if item]
342
342
343 def tagslist(self):
343 def tagslist(self):
344 '''return a list of tags ordered by revision'''
344 '''return a list of tags ordered by revision'''
345 l = []
345 l = []
346 for t, n in self.tags().iteritems():
346 for t, n in self.tags().iteritems():
347 try:
347 try:
348 r = self.changelog.rev(n)
348 r = self.changelog.rev(n)
349 except:
349 except:
350 r = -2 # sort to the beginning of the list if unknown
350 r = -2 # sort to the beginning of the list if unknown
351 l.append((r, t, n))
351 l.append((r, t, n))
352 return [(t, n) for r, t, n in util.sort(l)]
352 return [(t, n) for r, t, n in util.sort(l)]
353
353
354 def nodetags(self, node):
354 def nodetags(self, node):
355 '''return the tags associated with a node'''
355 '''return the tags associated with a node'''
356 if not self.nodetagscache:
356 if not self.nodetagscache:
357 self.nodetagscache = {}
357 self.nodetagscache = {}
358 for t, n in self.tags().iteritems():
358 for t, n in self.tags().iteritems():
359 self.nodetagscache.setdefault(n, []).append(t)
359 self.nodetagscache.setdefault(n, []).append(t)
360 return self.nodetagscache.get(node, [])
360 return self.nodetagscache.get(node, [])
361
361
362 def _branchtags(self, partial, lrev):
362 def _branchtags(self, partial, lrev):
363 # TODO: rename this function?
363 tiprev = len(self) - 1
364 tiprev = len(self) - 1
364 if lrev != tiprev:
365 if lrev != tiprev:
365 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._updatebranchcache(partial, lrev+1, tiprev+1)
366 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367 self._writebranchcache(partial, self.changelog.tip(), tiprev)
367
368
368 return partial
369 return partial
369
370
370 def branchtags(self):
371 def _branchheads(self):
371 tip = self.changelog.tip()
372 tip = self.changelog.tip()
372 if self.branchcache is not None and self._branchcachetip == tip:
373 if self.branchcache is not None and self._branchcachetip == tip:
373 return self.branchcache
374 return self.branchcache
374
375
375 oldtip = self._branchcachetip
376 oldtip = self._branchcachetip
376 self._branchcachetip = tip
377 self._branchcachetip = tip
377 if self.branchcache is None:
378 if self.branchcache is None:
378 self.branchcache = {} # avoid recursion in changectx
379 self.branchcache = {} # avoid recursion in changectx
379 else:
380 else:
380 self.branchcache.clear() # keep using the same dict
381 self.branchcache.clear() # keep using the same dict
381 if oldtip is None or oldtip not in self.changelog.nodemap:
382 if oldtip is None or oldtip not in self.changelog.nodemap:
382 partial, last, lrev = self._readbranchcache()
383 partial, last, lrev = self._readbranchcache()
383 else:
384 else:
384 lrev = self.changelog.rev(oldtip)
385 lrev = self.changelog.rev(oldtip)
385 partial = self._ubranchcache
386 partial = self._ubranchcache
386
387
387 self._branchtags(partial, lrev)
388 self._branchtags(partial, lrev)
389 # this private cache holds all heads (not just tips)
390 self._ubranchcache = partial
388
391
389 # the branch cache is stored on disk as UTF-8, but in the local
392 # the branch cache is stored on disk as UTF-8, but in the local
390 # charset internally
393 # charset internally
391 for k, v in partial.iteritems():
394 for k, v in partial.iteritems():
392 self.branchcache[util.tolocal(k)] = v
395 self.branchcache[util.tolocal(k)] = v
393 self._ubranchcache = partial
394 return self.branchcache
396 return self.branchcache
395
397
398
399 def branchtags(self):
400 '''return a dict where branch names map to the tipmost head of
401 the branch'''
402 return dict([(k, v[-1]) for (k, v) in self._branchheads().iteritems()])
403
396 def _readbranchcache(self):
404 def _readbranchcache(self):
397 partial = {}
405 partial = {}
398 try:
406 try:
399 f = self.opener("branch.cache")
407 f = self.opener("branchheads.cache")
400 lines = f.read().split('\n')
408 lines = f.read().split('\n')
401 f.close()
409 f.close()
402 except (IOError, OSError):
410 except (IOError, OSError):
403 return {}, nullid, nullrev
411 return {}, nullid, nullrev
404
412
405 try:
413 try:
406 last, lrev = lines.pop(0).split(" ", 1)
414 last, lrev = lines.pop(0).split(" ", 1)
407 last, lrev = bin(last), int(lrev)
415 last, lrev = bin(last), int(lrev)
408 if lrev >= len(self) or self[lrev].node() != last:
416 if lrev >= len(self) or self[lrev].node() != last:
409 # invalidate the cache
417 # invalidate the cache
410 raise ValueError('invalidating branch cache (tip differs)')
418 raise ValueError('invalidating branch cache (tip differs)')
411 for l in lines:
419 for l in lines:
412 if not l: continue
420 if not l: continue
413 node, label = l.split(" ", 1)
421 node, label = l.split(" ", 1)
414 partial[label.strip()] = bin(node)
422 partial.setdefault(label.strip(), []).append(bin(node))
415 except KeyboardInterrupt:
423 except KeyboardInterrupt:
416 raise
424 raise
417 except Exception, inst:
425 except Exception, inst:
418 if self.ui.debugflag:
426 if self.ui.debugflag:
419 self.ui.warn(str(inst), '\n')
427 self.ui.warn(str(inst), '\n')
420 partial, last, lrev = {}, nullid, nullrev
428 partial, last, lrev = {}, nullid, nullrev
421 return partial, last, lrev
429 return partial, last, lrev
422
430
423 def _writebranchcache(self, branches, tip, tiprev):
431 def _writebranchcache(self, branches, tip, tiprev):
424 try:
432 try:
425 f = self.opener("branch.cache", "w", atomictemp=True)
433 f = self.opener("branchheads.cache", "w", atomictemp=True)
426 f.write("%s %s\n" % (hex(tip), tiprev))
434 f.write("%s %s\n" % (hex(tip), tiprev))
427 for label, node in branches.iteritems():
435 for label, nodes in branches.iteritems():
428 f.write("%s %s\n" % (hex(node), label))
436 for node in nodes:
437 f.write("%s %s\n" % (hex(node), label))
429 f.rename()
438 f.rename()
430 except (IOError, OSError):
439 except (IOError, OSError):
431 pass
440 pass
432
441
433 def _updatebranchcache(self, partial, start, end):
442 def _updatebranchcache(self, partial, start, end):
434 for r in xrange(start, end):
443 for r in xrange(start, end):
435 c = self[r]
444 c = self[r]
436 b = c.branch()
445 b = c.branch()
437 partial[b] = c.node()
446 bheads = partial.setdefault(b, [])
447 bheads.append(c.node())
448 for p in c.parents():
449 pn = p.node()
450 if pn in bheads:
451 bheads.remove(pn)
438
452
439 def lookup(self, key):
453 def lookup(self, key):
440 if isinstance(key, int):
454 if isinstance(key, int):
441 return self.changelog.node(key)
455 return self.changelog.node(key)
442 elif key == '.':
456 elif key == '.':
443 return self.dirstate.parents()[0]
457 return self.dirstate.parents()[0]
444 elif key == 'null':
458 elif key == 'null':
445 return nullid
459 return nullid
446 elif key == 'tip':
460 elif key == 'tip':
447 return self.changelog.tip()
461 return self.changelog.tip()
448 n = self.changelog._match(key)
462 n = self.changelog._match(key)
449 if n:
463 if n:
450 return n
464 return n
451 if key in self.tags():
465 if key in self.tags():
452 return self.tags()[key]
466 return self.tags()[key]
453 if key in self.branchtags():
467 if key in self.branchtags():
454 return self.branchtags()[key]
468 return self.branchtags()[key]
455 n = self.changelog._partialmatch(key)
469 n = self.changelog._partialmatch(key)
456 if n:
470 if n:
457 return n
471 return n
458 try:
472 try:
459 if len(key) == 20:
473 if len(key) == 20:
460 key = hex(key)
474 key = hex(key)
461 except:
475 except:
462 pass
476 pass
463 raise error.RepoError(_("unknown revision '%s'") % key)
477 raise error.RepoError(_("unknown revision '%s'") % key)
464
478
465 def local(self):
479 def local(self):
466 return True
480 return True
467
481
468 def join(self, f):
482 def join(self, f):
469 return os.path.join(self.path, f)
483 return os.path.join(self.path, f)
470
484
471 def wjoin(self, f):
485 def wjoin(self, f):
472 return os.path.join(self.root, f)
486 return os.path.join(self.root, f)
473
487
474 def rjoin(self, f):
488 def rjoin(self, f):
475 return os.path.join(self.root, util.pconvert(f))
489 return os.path.join(self.root, util.pconvert(f))
476
490
477 def file(self, f):
491 def file(self, f):
478 if f[0] == '/':
492 if f[0] == '/':
479 f = f[1:]
493 f = f[1:]
480 return filelog.filelog(self.sopener, f)
494 return filelog.filelog(self.sopener, f)
481
495
482 def changectx(self, changeid):
496 def changectx(self, changeid):
483 return self[changeid]
497 return self[changeid]
484
498
485 def parents(self, changeid=None):
499 def parents(self, changeid=None):
486 '''get list of changectxs for parents of changeid'''
500 '''get list of changectxs for parents of changeid'''
487 return self[changeid].parents()
501 return self[changeid].parents()
488
502
489 def filectx(self, path, changeid=None, fileid=None):
503 def filectx(self, path, changeid=None, fileid=None):
490 """changeid can be a changeset revision, node, or tag.
504 """changeid can be a changeset revision, node, or tag.
491 fileid can be a file revision or node."""
505 fileid can be a file revision or node."""
492 return context.filectx(self, path, changeid, fileid)
506 return context.filectx(self, path, changeid, fileid)
493
507
494 def getcwd(self):
508 def getcwd(self):
495 return self.dirstate.getcwd()
509 return self.dirstate.getcwd()
496
510
497 def pathto(self, f, cwd=None):
511 def pathto(self, f, cwd=None):
498 return self.dirstate.pathto(f, cwd)
512 return self.dirstate.pathto(f, cwd)
499
513
500 def wfile(self, f, mode='r'):
514 def wfile(self, f, mode='r'):
501 return self.wopener(f, mode)
515 return self.wopener(f, mode)
502
516
503 def _link(self, f):
517 def _link(self, f):
504 return os.path.islink(self.wjoin(f))
518 return os.path.islink(self.wjoin(f))
505
519
506 def _filter(self, filter, filename, data):
520 def _filter(self, filter, filename, data):
507 if filter not in self.filterpats:
521 if filter not in self.filterpats:
508 l = []
522 l = []
509 for pat, cmd in self.ui.configitems(filter):
523 for pat, cmd in self.ui.configitems(filter):
510 if cmd == '!':
524 if cmd == '!':
511 continue
525 continue
512 mf = util.matcher(self.root, "", [pat], [], [])[1]
526 mf = util.matcher(self.root, "", [pat], [], [])[1]
513 fn = None
527 fn = None
514 params = cmd
528 params = cmd
515 for name, filterfn in self._datafilters.iteritems():
529 for name, filterfn in self._datafilters.iteritems():
516 if cmd.startswith(name):
530 if cmd.startswith(name):
517 fn = filterfn
531 fn = filterfn
518 params = cmd[len(name):].lstrip()
532 params = cmd[len(name):].lstrip()
519 break
533 break
520 if not fn:
534 if not fn:
521 fn = lambda s, c, **kwargs: util.filter(s, c)
535 fn = lambda s, c, **kwargs: util.filter(s, c)
522 # Wrap old filters not supporting keyword arguments
536 # Wrap old filters not supporting keyword arguments
523 if not inspect.getargspec(fn)[2]:
537 if not inspect.getargspec(fn)[2]:
524 oldfn = fn
538 oldfn = fn
525 fn = lambda s, c, **kwargs: oldfn(s, c)
539 fn = lambda s, c, **kwargs: oldfn(s, c)
526 l.append((mf, fn, params))
540 l.append((mf, fn, params))
527 self.filterpats[filter] = l
541 self.filterpats[filter] = l
528
542
529 for mf, fn, cmd in self.filterpats[filter]:
543 for mf, fn, cmd in self.filterpats[filter]:
530 if mf(filename):
544 if mf(filename):
531 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
545 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
532 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
546 data = fn(data, cmd, ui=self.ui, repo=self, filename=filename)
533 break
547 break
534
548
535 return data
549 return data
536
550
537 def adddatafilter(self, name, filter):
551 def adddatafilter(self, name, filter):
538 self._datafilters[name] = filter
552 self._datafilters[name] = filter
539
553
540 def wread(self, filename):
554 def wread(self, filename):
541 if self._link(filename):
555 if self._link(filename):
542 data = os.readlink(self.wjoin(filename))
556 data = os.readlink(self.wjoin(filename))
543 else:
557 else:
544 data = self.wopener(filename, 'r').read()
558 data = self.wopener(filename, 'r').read()
545 return self._filter("encode", filename, data)
559 return self._filter("encode", filename, data)
546
560
547 def wwrite(self, filename, data, flags):
561 def wwrite(self, filename, data, flags):
548 data = self._filter("decode", filename, data)
562 data = self._filter("decode", filename, data)
549 try:
563 try:
550 os.unlink(self.wjoin(filename))
564 os.unlink(self.wjoin(filename))
551 except OSError:
565 except OSError:
552 pass
566 pass
553 if 'l' in flags:
567 if 'l' in flags:
554 self.wopener.symlink(data, filename)
568 self.wopener.symlink(data, filename)
555 else:
569 else:
556 self.wopener(filename, 'w').write(data)
570 self.wopener(filename, 'w').write(data)
557 if 'x' in flags:
571 if 'x' in flags:
558 util.set_flags(self.wjoin(filename), False, True)
572 util.set_flags(self.wjoin(filename), False, True)
559
573
560 def wwritedata(self, filename, data):
574 def wwritedata(self, filename, data):
561 return self._filter("decode", filename, data)
575 return self._filter("decode", filename, data)
562
576
563 def transaction(self):
577 def transaction(self):
564 if self._transref and self._transref():
578 if self._transref and self._transref():
565 return self._transref().nest()
579 return self._transref().nest()
566
580
567 # abort here if the journal already exists
581 # abort here if the journal already exists
568 if os.path.exists(self.sjoin("journal")):
582 if os.path.exists(self.sjoin("journal")):
569 raise error.RepoError(_("journal already exists - run hg recover"))
583 raise error.RepoError(_("journal already exists - run hg recover"))
570
584
571 # save dirstate for rollback
585 # save dirstate for rollback
572 try:
586 try:
573 ds = self.opener("dirstate").read()
587 ds = self.opener("dirstate").read()
574 except IOError:
588 except IOError:
575 ds = ""
589 ds = ""
576 self.opener("journal.dirstate", "w").write(ds)
590 self.opener("journal.dirstate", "w").write(ds)
577 self.opener("journal.branch", "w").write(self.dirstate.branch())
591 self.opener("journal.branch", "w").write(self.dirstate.branch())
578
592
579 renames = [(self.sjoin("journal"), self.sjoin("undo")),
593 renames = [(self.sjoin("journal"), self.sjoin("undo")),
580 (self.join("journal.dirstate"), self.join("undo.dirstate")),
594 (self.join("journal.dirstate"), self.join("undo.dirstate")),
581 (self.join("journal.branch"), self.join("undo.branch"))]
595 (self.join("journal.branch"), self.join("undo.branch"))]
582 tr = transaction.transaction(self.ui.warn, self.sopener,
596 tr = transaction.transaction(self.ui.warn, self.sopener,
583 self.sjoin("journal"),
597 self.sjoin("journal"),
584 aftertrans(renames),
598 aftertrans(renames),
585 self.store.createmode)
599 self.store.createmode)
586 self._transref = weakref.ref(tr)
600 self._transref = weakref.ref(tr)
587 return tr
601 return tr
588
602
589 def recover(self):
603 def recover(self):
590 l = self.lock()
604 l = self.lock()
591 try:
605 try:
592 if os.path.exists(self.sjoin("journal")):
606 if os.path.exists(self.sjoin("journal")):
593 self.ui.status(_("rolling back interrupted transaction\n"))
607 self.ui.status(_("rolling back interrupted transaction\n"))
594 transaction.rollback(self.sopener, self.sjoin("journal"))
608 transaction.rollback(self.sopener, self.sjoin("journal"))
595 self.invalidate()
609 self.invalidate()
596 return True
610 return True
597 else:
611 else:
598 self.ui.warn(_("no interrupted transaction available\n"))
612 self.ui.warn(_("no interrupted transaction available\n"))
599 return False
613 return False
600 finally:
614 finally:
601 del l
615 del l
602
616
603 def rollback(self):
617 def rollback(self):
604 wlock = lock = None
618 wlock = lock = None
605 try:
619 try:
606 wlock = self.wlock()
620 wlock = self.wlock()
607 lock = self.lock()
621 lock = self.lock()
608 if os.path.exists(self.sjoin("undo")):
622 if os.path.exists(self.sjoin("undo")):
609 self.ui.status(_("rolling back last transaction\n"))
623 self.ui.status(_("rolling back last transaction\n"))
610 transaction.rollback(self.sopener, self.sjoin("undo"))
624 transaction.rollback(self.sopener, self.sjoin("undo"))
611 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
625 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
612 try:
626 try:
613 branch = self.opener("undo.branch").read()
627 branch = self.opener("undo.branch").read()
614 self.dirstate.setbranch(branch)
628 self.dirstate.setbranch(branch)
615 except IOError:
629 except IOError:
616 self.ui.warn(_("Named branch could not be reset, "
630 self.ui.warn(_("Named branch could not be reset, "
617 "current branch still is: %s\n")
631 "current branch still is: %s\n")
618 % util.tolocal(self.dirstate.branch()))
632 % util.tolocal(self.dirstate.branch()))
619 self.invalidate()
633 self.invalidate()
620 self.dirstate.invalidate()
634 self.dirstate.invalidate()
621 else:
635 else:
622 self.ui.warn(_("no rollback information available\n"))
636 self.ui.warn(_("no rollback information available\n"))
623 finally:
637 finally:
624 del lock, wlock
638 del lock, wlock
625
639
626 def invalidate(self):
640 def invalidate(self):
627 for a in "changelog manifest".split():
641 for a in "changelog manifest".split():
628 if a in self.__dict__:
642 if a in self.__dict__:
629 delattr(self, a)
643 delattr(self, a)
630 self.tagscache = None
644 self.tagscache = None
631 self._tagstypecache = None
645 self._tagstypecache = None
632 self.nodetagscache = None
646 self.nodetagscache = None
633 self.branchcache = None
647 self.branchcache = None
634 self._ubranchcache = None
648 self._ubranchcache = None
635 self._branchcachetip = None
649 self._branchcachetip = None
636
650
637 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
651 def _lock(self, lockname, wait, releasefn, acquirefn, desc):
638 try:
652 try:
639 l = lock.lock(lockname, 0, releasefn, desc=desc)
653 l = lock.lock(lockname, 0, releasefn, desc=desc)
640 except error.LockHeld, inst:
654 except error.LockHeld, inst:
641 if not wait:
655 if not wait:
642 raise
656 raise
643 self.ui.warn(_("waiting for lock on %s held by %r\n") %
657 self.ui.warn(_("waiting for lock on %s held by %r\n") %
644 (desc, inst.locker))
658 (desc, inst.locker))
645 # default to 600 seconds timeout
659 # default to 600 seconds timeout
646 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
660 l = lock.lock(lockname, int(self.ui.config("ui", "timeout", "600")),
647 releasefn, desc=desc)
661 releasefn, desc=desc)
648 if acquirefn:
662 if acquirefn:
649 acquirefn()
663 acquirefn()
650 return l
664 return l
651
665
652 def lock(self, wait=True):
666 def lock(self, wait=True):
653 if self._lockref and self._lockref():
667 if self._lockref and self._lockref():
654 return self._lockref()
668 return self._lockref()
655
669
656 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
670 l = self._lock(self.sjoin("lock"), wait, None, self.invalidate,
657 _('repository %s') % self.origroot)
671 _('repository %s') % self.origroot)
658 self._lockref = weakref.ref(l)
672 self._lockref = weakref.ref(l)
659 return l
673 return l
660
674
661 def wlock(self, wait=True):
675 def wlock(self, wait=True):
662 if self._wlockref and self._wlockref():
676 if self._wlockref and self._wlockref():
663 return self._wlockref()
677 return self._wlockref()
664
678
665 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
679 l = self._lock(self.join("wlock"), wait, self.dirstate.write,
666 self.dirstate.invalidate, _('working directory of %s') %
680 self.dirstate.invalidate, _('working directory of %s') %
667 self.origroot)
681 self.origroot)
668 self._wlockref = weakref.ref(l)
682 self._wlockref = weakref.ref(l)
669 return l
683 return l
670
684
671 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
685 def filecommit(self, fctx, manifest1, manifest2, linkrev, tr, changelist):
672 """
686 """
673 commit an individual file as part of a larger transaction
687 commit an individual file as part of a larger transaction
674 """
688 """
675
689
676 fn = fctx.path()
690 fn = fctx.path()
677 t = fctx.data()
691 t = fctx.data()
678 fl = self.file(fn)
692 fl = self.file(fn)
679 fp1 = manifest1.get(fn, nullid)
693 fp1 = manifest1.get(fn, nullid)
680 fp2 = manifest2.get(fn, nullid)
694 fp2 = manifest2.get(fn, nullid)
681
695
682 meta = {}
696 meta = {}
683 cp = fctx.renamed()
697 cp = fctx.renamed()
684 if cp and cp[0] != fn:
698 if cp and cp[0] != fn:
685 # Mark the new revision of this file as a copy of another
699 # Mark the new revision of this file as a copy of another
686 # file. This copy data will effectively act as a parent
700 # file. This copy data will effectively act as a parent
687 # of this new revision. If this is a merge, the first
701 # of this new revision. If this is a merge, the first
688 # parent will be the nullid (meaning "look up the copy data")
702 # parent will be the nullid (meaning "look up the copy data")
689 # and the second one will be the other parent. For example:
703 # and the second one will be the other parent. For example:
690 #
704 #
691 # 0 --- 1 --- 3 rev1 changes file foo
705 # 0 --- 1 --- 3 rev1 changes file foo
692 # \ / rev2 renames foo to bar and changes it
706 # \ / rev2 renames foo to bar and changes it
693 # \- 2 -/ rev3 should have bar with all changes and
707 # \- 2 -/ rev3 should have bar with all changes and
694 # should record that bar descends from
708 # should record that bar descends from
695 # bar in rev2 and foo in rev1
709 # bar in rev2 and foo in rev1
696 #
710 #
697 # this allows this merge to succeed:
711 # this allows this merge to succeed:
698 #
712 #
699 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
713 # 0 --- 1 --- 3 rev4 reverts the content change from rev2
700 # \ / merging rev3 and rev4 should use bar@rev2
714 # \ / merging rev3 and rev4 should use bar@rev2
701 # \- 2 --- 4 as the merge base
715 # \- 2 --- 4 as the merge base
702 #
716 #
703
717
704 cf = cp[0]
718 cf = cp[0]
705 cr = manifest1.get(cf)
719 cr = manifest1.get(cf)
706 nfp = fp2
720 nfp = fp2
707
721
708 if manifest2: # branch merge
722 if manifest2: # branch merge
709 if fp2 == nullid: # copied on remote side
723 if fp2 == nullid: # copied on remote side
710 if fp1 != nullid or cf in manifest2:
724 if fp1 != nullid or cf in manifest2:
711 cr = manifest2[cf]
725 cr = manifest2[cf]
712 nfp = fp1
726 nfp = fp1
713
727
714 # find source in nearest ancestor if we've lost track
728 # find source in nearest ancestor if we've lost track
715 if not cr:
729 if not cr:
716 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
730 self.ui.debug(_(" %s: searching for copy revision for %s\n") %
717 (fn, cf))
731 (fn, cf))
718 for a in self['.'].ancestors():
732 for a in self['.'].ancestors():
719 if cf in a:
733 if cf in a:
720 cr = a[cf].filenode()
734 cr = a[cf].filenode()
721 break
735 break
722
736
723 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
737 self.ui.debug(_(" %s: copy %s:%s\n") % (fn, cf, hex(cr)))
724 meta["copy"] = cf
738 meta["copy"] = cf
725 meta["copyrev"] = hex(cr)
739 meta["copyrev"] = hex(cr)
726 fp1, fp2 = nullid, nfp
740 fp1, fp2 = nullid, nfp
727 elif fp2 != nullid:
741 elif fp2 != nullid:
728 # is one parent an ancestor of the other?
742 # is one parent an ancestor of the other?
729 fpa = fl.ancestor(fp1, fp2)
743 fpa = fl.ancestor(fp1, fp2)
730 if fpa == fp1:
744 if fpa == fp1:
731 fp1, fp2 = fp2, nullid
745 fp1, fp2 = fp2, nullid
732 elif fpa == fp2:
746 elif fpa == fp2:
733 fp2 = nullid
747 fp2 = nullid
734
748
735 # is the file unmodified from the parent? report existing entry
749 # is the file unmodified from the parent? report existing entry
736 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
750 if fp2 == nullid and not fl.cmp(fp1, t) and not meta:
737 return fp1
751 return fp1
738
752
739 changelist.append(fn)
753 changelist.append(fn)
740 return fl.add(t, meta, tr, linkrev, fp1, fp2)
754 return fl.add(t, meta, tr, linkrev, fp1, fp2)
741
755
742 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
756 def rawcommit(self, files, text, user, date, p1=None, p2=None, extra={}):
743 if p1 is None:
757 if p1 is None:
744 p1, p2 = self.dirstate.parents()
758 p1, p2 = self.dirstate.parents()
745 return self.commit(files=files, text=text, user=user, date=date,
759 return self.commit(files=files, text=text, user=user, date=date,
746 p1=p1, p2=p2, extra=extra, empty_ok=True)
760 p1=p1, p2=p2, extra=extra, empty_ok=True)
747
761
748 def commit(self, files=None, text="", user=None, date=None,
762 def commit(self, files=None, text="", user=None, date=None,
749 match=None, force=False, force_editor=False,
763 match=None, force=False, force_editor=False,
750 p1=None, p2=None, extra={}, empty_ok=False):
764 p1=None, p2=None, extra={}, empty_ok=False):
751 wlock = lock = None
765 wlock = lock = None
752 if files:
766 if files:
753 files = util.unique(files)
767 files = util.unique(files)
754 try:
768 try:
755 wlock = self.wlock()
769 wlock = self.wlock()
756 lock = self.lock()
770 lock = self.lock()
757 use_dirstate = (p1 is None) # not rawcommit
771 use_dirstate = (p1 is None) # not rawcommit
758
772
759 if use_dirstate:
773 if use_dirstate:
760 p1, p2 = self.dirstate.parents()
774 p1, p2 = self.dirstate.parents()
761 update_dirstate = True
775 update_dirstate = True
762
776
763 if (not force and p2 != nullid and
777 if (not force and p2 != nullid and
764 (match and (match.files() or match.anypats()))):
778 (match and (match.files() or match.anypats()))):
765 raise util.Abort(_('cannot partially commit a merge '
779 raise util.Abort(_('cannot partially commit a merge '
766 '(do not specify files or patterns)'))
780 '(do not specify files or patterns)'))
767
781
768 if files:
782 if files:
769 modified, removed = [], []
783 modified, removed = [], []
770 for f in files:
784 for f in files:
771 s = self.dirstate[f]
785 s = self.dirstate[f]
772 if s in 'nma':
786 if s in 'nma':
773 modified.append(f)
787 modified.append(f)
774 elif s == 'r':
788 elif s == 'r':
775 removed.append(f)
789 removed.append(f)
776 else:
790 else:
777 self.ui.warn(_("%s not tracked!\n") % f)
791 self.ui.warn(_("%s not tracked!\n") % f)
778 changes = [modified, [], removed, [], []]
792 changes = [modified, [], removed, [], []]
779 else:
793 else:
780 changes = self.status(match=match)
794 changes = self.status(match=match)
781 else:
795 else:
782 p1, p2 = p1, p2 or nullid
796 p1, p2 = p1, p2 or nullid
783 update_dirstate = (self.dirstate.parents()[0] == p1)
797 update_dirstate = (self.dirstate.parents()[0] == p1)
784 changes = [files, [], [], [], []]
798 changes = [files, [], [], [], []]
785
799
786 ms = merge_.mergestate(self)
800 ms = merge_.mergestate(self)
787 for f in changes[0]:
801 for f in changes[0]:
788 if f in ms and ms[f] == 'u':
802 if f in ms and ms[f] == 'u':
789 raise util.Abort(_("unresolved merge conflicts "
803 raise util.Abort(_("unresolved merge conflicts "
790 "(see hg resolve)"))
804 "(see hg resolve)"))
791 wctx = context.workingctx(self, (p1, p2), text, user, date,
805 wctx = context.workingctx(self, (p1, p2), text, user, date,
792 extra, changes)
806 extra, changes)
793 return self._commitctx(wctx, force, force_editor, empty_ok,
807 return self._commitctx(wctx, force, force_editor, empty_ok,
794 use_dirstate, update_dirstate)
808 use_dirstate, update_dirstate)
795 finally:
809 finally:
796 del lock, wlock
810 del lock, wlock
797
811
798 def commitctx(self, ctx):
812 def commitctx(self, ctx):
799 """Add a new revision to current repository.
813 """Add a new revision to current repository.
800
814
801 Revision information is passed in the context.memctx argument.
815 Revision information is passed in the context.memctx argument.
802 commitctx() does not touch the working directory.
816 commitctx() does not touch the working directory.
803 """
817 """
804 wlock = lock = None
818 wlock = lock = None
805 try:
819 try:
806 wlock = self.wlock()
820 wlock = self.wlock()
807 lock = self.lock()
821 lock = self.lock()
808 return self._commitctx(ctx, force=True, force_editor=False,
822 return self._commitctx(ctx, force=True, force_editor=False,
809 empty_ok=True, use_dirstate=False,
823 empty_ok=True, use_dirstate=False,
810 update_dirstate=False)
824 update_dirstate=False)
811 finally:
825 finally:
812 del lock, wlock
826 del lock, wlock
813
827
814 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
828 def _commitctx(self, wctx, force=False, force_editor=False, empty_ok=False,
815 use_dirstate=True, update_dirstate=True):
829 use_dirstate=True, update_dirstate=True):
816 tr = None
830 tr = None
817 valid = 0 # don't save the dirstate if this isn't set
831 valid = 0 # don't save the dirstate if this isn't set
818 try:
832 try:
819 commit = util.sort(wctx.modified() + wctx.added())
833 commit = util.sort(wctx.modified() + wctx.added())
820 remove = wctx.removed()
834 remove = wctx.removed()
821 extra = wctx.extra().copy()
835 extra = wctx.extra().copy()
822 branchname = extra['branch']
836 branchname = extra['branch']
823 user = wctx.user()
837 user = wctx.user()
824 text = wctx.description()
838 text = wctx.description()
825
839
826 p1, p2 = [p.node() for p in wctx.parents()]
840 p1, p2 = [p.node() for p in wctx.parents()]
827 c1 = self.changelog.read(p1)
841 c1 = self.changelog.read(p1)
828 c2 = self.changelog.read(p2)
842 c2 = self.changelog.read(p2)
829 m1 = self.manifest.read(c1[0]).copy()
843 m1 = self.manifest.read(c1[0]).copy()
830 m2 = self.manifest.read(c2[0])
844 m2 = self.manifest.read(c2[0])
831
845
832 if use_dirstate:
846 if use_dirstate:
833 oldname = c1[5].get("branch") # stored in UTF-8
847 oldname = c1[5].get("branch") # stored in UTF-8
834 if (not commit and not remove and not force and p2 == nullid
848 if (not commit and not remove and not force and p2 == nullid
835 and branchname == oldname):
849 and branchname == oldname):
836 self.ui.status(_("nothing changed\n"))
850 self.ui.status(_("nothing changed\n"))
837 return None
851 return None
838
852
839 xp1 = hex(p1)
853 xp1 = hex(p1)
840 if p2 == nullid: xp2 = ''
854 if p2 == nullid: xp2 = ''
841 else: xp2 = hex(p2)
855 else: xp2 = hex(p2)
842
856
843 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
857 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
844
858
845 tr = self.transaction()
859 tr = self.transaction()
846 trp = weakref.proxy(tr)
860 trp = weakref.proxy(tr)
847
861
848 # check in files
862 # check in files
849 new = {}
863 new = {}
850 changed = []
864 changed = []
851 linkrev = len(self)
865 linkrev = len(self)
852 for f in commit:
866 for f in commit:
853 self.ui.note(f + "\n")
867 self.ui.note(f + "\n")
854 try:
868 try:
855 fctx = wctx.filectx(f)
869 fctx = wctx.filectx(f)
856 newflags = fctx.flags()
870 newflags = fctx.flags()
857 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
871 new[f] = self.filecommit(fctx, m1, m2, linkrev, trp, changed)
858 if ((not changed or changed[-1] != f) and
872 if ((not changed or changed[-1] != f) and
859 m2.get(f) != new[f]):
873 m2.get(f) != new[f]):
860 # mention the file in the changelog if some
874 # mention the file in the changelog if some
861 # flag changed, even if there was no content
875 # flag changed, even if there was no content
862 # change.
876 # change.
863 if m1.flags(f) != newflags:
877 if m1.flags(f) != newflags:
864 changed.append(f)
878 changed.append(f)
865 m1.set(f, newflags)
879 m1.set(f, newflags)
866 if use_dirstate:
880 if use_dirstate:
867 self.dirstate.normal(f)
881 self.dirstate.normal(f)
868
882
869 except (OSError, IOError):
883 except (OSError, IOError):
870 if use_dirstate:
884 if use_dirstate:
871 self.ui.warn(_("trouble committing %s!\n") % f)
885 self.ui.warn(_("trouble committing %s!\n") % f)
872 raise
886 raise
873 else:
887 else:
874 remove.append(f)
888 remove.append(f)
875
889
876 updated, added = [], []
890 updated, added = [], []
877 for f in util.sort(changed):
891 for f in util.sort(changed):
878 if f in m1 or f in m2:
892 if f in m1 or f in m2:
879 updated.append(f)
893 updated.append(f)
880 else:
894 else:
881 added.append(f)
895 added.append(f)
882
896
883 # update manifest
897 # update manifest
884 m1.update(new)
898 m1.update(new)
885 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
899 removed = [f for f in util.sort(remove) if f in m1 or f in m2]
886 removed1 = []
900 removed1 = []
887
901
888 for f in removed:
902 for f in removed:
889 if f in m1:
903 if f in m1:
890 del m1[f]
904 del m1[f]
891 removed1.append(f)
905 removed1.append(f)
892 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
906 mn = self.manifest.add(m1, trp, linkrev, c1[0], c2[0],
893 (new, removed1))
907 (new, removed1))
894
908
895 # add changeset
909 # add changeset
896 if (not empty_ok and not text) or force_editor:
910 if (not empty_ok and not text) or force_editor:
897 edittext = []
911 edittext = []
898 if text:
912 if text:
899 edittext.append(text)
913 edittext.append(text)
900 edittext.append("")
914 edittext.append("")
901 edittext.append("") # Empty line between message and comments.
915 edittext.append("") # Empty line between message and comments.
902 edittext.append(_("HG: Enter commit message."
916 edittext.append(_("HG: Enter commit message."
903 " Lines beginning with 'HG:' are removed."))
917 " Lines beginning with 'HG:' are removed."))
904 edittext.append("HG: --")
918 edittext.append("HG: --")
905 edittext.append("HG: user: %s" % user)
919 edittext.append("HG: user: %s" % user)
906 if p2 != nullid:
920 if p2 != nullid:
907 edittext.append("HG: branch merge")
921 edittext.append("HG: branch merge")
908 if branchname:
922 if branchname:
909 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
923 edittext.append("HG: branch '%s'" % util.tolocal(branchname))
910 edittext.extend(["HG: added %s" % f for f in added])
924 edittext.extend(["HG: added %s" % f for f in added])
911 edittext.extend(["HG: changed %s" % f for f in updated])
925 edittext.extend(["HG: changed %s" % f for f in updated])
912 edittext.extend(["HG: removed %s" % f for f in removed])
926 edittext.extend(["HG: removed %s" % f for f in removed])
913 if not added and not updated and not removed:
927 if not added and not updated and not removed:
914 edittext.append("HG: no files changed")
928 edittext.append("HG: no files changed")
915 edittext.append("")
929 edittext.append("")
916 # run editor in the repository root
930 # run editor in the repository root
917 olddir = os.getcwd()
931 olddir = os.getcwd()
918 os.chdir(self.root)
932 os.chdir(self.root)
919 text = self.ui.edit("\n".join(edittext), user)
933 text = self.ui.edit("\n".join(edittext), user)
920 os.chdir(olddir)
934 os.chdir(olddir)
921
935
922 lines = [line.rstrip() for line in text.rstrip().splitlines()]
936 lines = [line.rstrip() for line in text.rstrip().splitlines()]
923 while lines and not lines[0]:
937 while lines and not lines[0]:
924 del lines[0]
938 del lines[0]
925 if not lines and use_dirstate:
939 if not lines and use_dirstate:
926 raise util.Abort(_("empty commit message"))
940 raise util.Abort(_("empty commit message"))
927 text = '\n'.join(lines)
941 text = '\n'.join(lines)
928
942
929 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
943 n = self.changelog.add(mn, changed + removed, text, trp, p1, p2,
930 user, wctx.date(), extra)
944 user, wctx.date(), extra)
931 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
945 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
932 parent2=xp2)
946 parent2=xp2)
933 tr.close()
947 tr.close()
934
948
935 if self.branchcache:
949 if self.branchcache:
936 self.branchtags()
950 self.branchtags()
937
951
938 if use_dirstate or update_dirstate:
952 if use_dirstate or update_dirstate:
939 self.dirstate.setparents(n)
953 self.dirstate.setparents(n)
940 if use_dirstate:
954 if use_dirstate:
941 for f in removed:
955 for f in removed:
942 self.dirstate.forget(f)
956 self.dirstate.forget(f)
943 valid = 1 # our dirstate updates are complete
957 valid = 1 # our dirstate updates are complete
944
958
945 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
959 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
946 return n
960 return n
947 finally:
961 finally:
948 if not valid: # don't save our updated dirstate
962 if not valid: # don't save our updated dirstate
949 self.dirstate.invalidate()
963 self.dirstate.invalidate()
950 del tr
964 del tr
951
965
952 def walk(self, match, node=None):
966 def walk(self, match, node=None):
953 '''
967 '''
954 walk recursively through the directory tree or a given
968 walk recursively through the directory tree or a given
955 changeset, finding all files matched by the match
969 changeset, finding all files matched by the match
956 function
970 function
957 '''
971 '''
958 return self[node].walk(match)
972 return self[node].walk(match)
959
973
960 def status(self, node1='.', node2=None, match=None,
974 def status(self, node1='.', node2=None, match=None,
961 ignored=False, clean=False, unknown=False):
975 ignored=False, clean=False, unknown=False):
962 """return status of files between two nodes or node and working directory
976 """return status of files between two nodes or node and working directory
963
977
964 If node1 is None, use the first dirstate parent instead.
978 If node1 is None, use the first dirstate parent instead.
965 If node2 is None, compare node1 with working directory.
979 If node2 is None, compare node1 with working directory.
966 """
980 """
967
981
968 def mfmatches(ctx):
982 def mfmatches(ctx):
969 mf = ctx.manifest().copy()
983 mf = ctx.manifest().copy()
970 for fn in mf.keys():
984 for fn in mf.keys():
971 if not match(fn):
985 if not match(fn):
972 del mf[fn]
986 del mf[fn]
973 return mf
987 return mf
974
988
975 if isinstance(node1, context.changectx):
989 if isinstance(node1, context.changectx):
976 ctx1 = node1
990 ctx1 = node1
977 else:
991 else:
978 ctx1 = self[node1]
992 ctx1 = self[node1]
979 if isinstance(node2, context.changectx):
993 if isinstance(node2, context.changectx):
980 ctx2 = node2
994 ctx2 = node2
981 else:
995 else:
982 ctx2 = self[node2]
996 ctx2 = self[node2]
983
997
984 working = ctx2.rev() is None
998 working = ctx2.rev() is None
985 parentworking = working and ctx1 == self['.']
999 parentworking = working and ctx1 == self['.']
986 match = match or match_.always(self.root, self.getcwd())
1000 match = match or match_.always(self.root, self.getcwd())
987 listignored, listclean, listunknown = ignored, clean, unknown
1001 listignored, listclean, listunknown = ignored, clean, unknown
988
1002
989 # load earliest manifest first for caching reasons
1003 # load earliest manifest first for caching reasons
990 if not working and ctx2.rev() < ctx1.rev():
1004 if not working and ctx2.rev() < ctx1.rev():
991 ctx2.manifest()
1005 ctx2.manifest()
992
1006
993 if not parentworking:
1007 if not parentworking:
994 def bad(f, msg):
1008 def bad(f, msg):
995 if f not in ctx1:
1009 if f not in ctx1:
996 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
1010 self.ui.warn('%s: %s\n' % (self.dirstate.pathto(f), msg))
997 return False
1011 return False
998 match.bad = bad
1012 match.bad = bad
999
1013
1000 if working: # we need to scan the working dir
1014 if working: # we need to scan the working dir
1001 s = self.dirstate.status(match, listignored, listclean, listunknown)
1015 s = self.dirstate.status(match, listignored, listclean, listunknown)
1002 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1016 cmp, modified, added, removed, deleted, unknown, ignored, clean = s
1003
1017
1004 # check for any possibly clean files
1018 # check for any possibly clean files
1005 if parentworking and cmp:
1019 if parentworking and cmp:
1006 fixup = []
1020 fixup = []
1007 # do a full compare of any files that might have changed
1021 # do a full compare of any files that might have changed
1008 for f in cmp:
1022 for f in cmp:
1009 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1023 if (f not in ctx1 or ctx2.flags(f) != ctx1.flags(f)
1010 or ctx1[f].cmp(ctx2[f].data())):
1024 or ctx1[f].cmp(ctx2[f].data())):
1011 modified.append(f)
1025 modified.append(f)
1012 else:
1026 else:
1013 fixup.append(f)
1027 fixup.append(f)
1014
1028
1015 if listclean:
1029 if listclean:
1016 clean += fixup
1030 clean += fixup
1017
1031
1018 # update dirstate for files that are actually clean
1032 # update dirstate for files that are actually clean
1019 if fixup:
1033 if fixup:
1020 wlock = None
1034 wlock = None
1021 try:
1035 try:
1022 try:
1036 try:
1023 wlock = self.wlock(False)
1037 wlock = self.wlock(False)
1024 for f in fixup:
1038 for f in fixup:
1025 self.dirstate.normal(f)
1039 self.dirstate.normal(f)
1026 except lock.LockError:
1040 except lock.LockError:
1027 pass
1041 pass
1028 finally:
1042 finally:
1029 del wlock
1043 del wlock
1030
1044
1031 if not parentworking:
1045 if not parentworking:
1032 mf1 = mfmatches(ctx1)
1046 mf1 = mfmatches(ctx1)
1033 if working:
1047 if working:
1034 # we are comparing working dir against non-parent
1048 # we are comparing working dir against non-parent
1035 # generate a pseudo-manifest for the working dir
1049 # generate a pseudo-manifest for the working dir
1036 mf2 = mfmatches(self['.'])
1050 mf2 = mfmatches(self['.'])
1037 for f in cmp + modified + added:
1051 for f in cmp + modified + added:
1038 mf2[f] = None
1052 mf2[f] = None
1039 mf2.set(f, ctx2.flags(f))
1053 mf2.set(f, ctx2.flags(f))
1040 for f in removed:
1054 for f in removed:
1041 if f in mf2:
1055 if f in mf2:
1042 del mf2[f]
1056 del mf2[f]
1043 else:
1057 else:
1044 # we are comparing two revisions
1058 # we are comparing two revisions
1045 deleted, unknown, ignored = [], [], []
1059 deleted, unknown, ignored = [], [], []
1046 mf2 = mfmatches(ctx2)
1060 mf2 = mfmatches(ctx2)
1047
1061
1048 modified, added, clean = [], [], []
1062 modified, added, clean = [], [], []
1049 for fn in mf2:
1063 for fn in mf2:
1050 if fn in mf1:
1064 if fn in mf1:
1051 if (mf1.flags(fn) != mf2.flags(fn) or
1065 if (mf1.flags(fn) != mf2.flags(fn) or
1052 (mf1[fn] != mf2[fn] and
1066 (mf1[fn] != mf2[fn] and
1053 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1067 (mf2[fn] or ctx1[fn].cmp(ctx2[fn].data())))):
1054 modified.append(fn)
1068 modified.append(fn)
1055 elif listclean:
1069 elif listclean:
1056 clean.append(fn)
1070 clean.append(fn)
1057 del mf1[fn]
1071 del mf1[fn]
1058 else:
1072 else:
1059 added.append(fn)
1073 added.append(fn)
1060 removed = mf1.keys()
1074 removed = mf1.keys()
1061
1075
1062 r = modified, added, removed, deleted, unknown, ignored, clean
1076 r = modified, added, removed, deleted, unknown, ignored, clean
1063 [l.sort() for l in r]
1077 [l.sort() for l in r]
1064 return r
1078 return r
1065
1079
1066 def add(self, list):
1080 def add(self, list):
1067 wlock = self.wlock()
1081 wlock = self.wlock()
1068 try:
1082 try:
1069 rejected = []
1083 rejected = []
1070 for f in list:
1084 for f in list:
1071 p = self.wjoin(f)
1085 p = self.wjoin(f)
1072 try:
1086 try:
1073 st = os.lstat(p)
1087 st = os.lstat(p)
1074 except:
1088 except:
1075 self.ui.warn(_("%s does not exist!\n") % f)
1089 self.ui.warn(_("%s does not exist!\n") % f)
1076 rejected.append(f)
1090 rejected.append(f)
1077 continue
1091 continue
1078 if st.st_size > 10000000:
1092 if st.st_size > 10000000:
1079 self.ui.warn(_("%s: files over 10MB may cause memory and"
1093 self.ui.warn(_("%s: files over 10MB may cause memory and"
1080 " performance problems\n"
1094 " performance problems\n"
1081 "(use 'hg revert %s' to unadd the file)\n")
1095 "(use 'hg revert %s' to unadd the file)\n")
1082 % (f, f))
1096 % (f, f))
1083 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1097 if not (stat.S_ISREG(st.st_mode) or stat.S_ISLNK(st.st_mode)):
1084 self.ui.warn(_("%s not added: only files and symlinks "
1098 self.ui.warn(_("%s not added: only files and symlinks "
1085 "supported currently\n") % f)
1099 "supported currently\n") % f)
1086 rejected.append(p)
1100 rejected.append(p)
1087 elif self.dirstate[f] in 'amn':
1101 elif self.dirstate[f] in 'amn':
1088 self.ui.warn(_("%s already tracked!\n") % f)
1102 self.ui.warn(_("%s already tracked!\n") % f)
1089 elif self.dirstate[f] == 'r':
1103 elif self.dirstate[f] == 'r':
1090 self.dirstate.normallookup(f)
1104 self.dirstate.normallookup(f)
1091 else:
1105 else:
1092 self.dirstate.add(f)
1106 self.dirstate.add(f)
1093 return rejected
1107 return rejected
1094 finally:
1108 finally:
1095 del wlock
1109 del wlock
1096
1110
1097 def forget(self, list):
1111 def forget(self, list):
1098 wlock = self.wlock()
1112 wlock = self.wlock()
1099 try:
1113 try:
1100 for f in list:
1114 for f in list:
1101 if self.dirstate[f] != 'a':
1115 if self.dirstate[f] != 'a':
1102 self.ui.warn(_("%s not added!\n") % f)
1116 self.ui.warn(_("%s not added!\n") % f)
1103 else:
1117 else:
1104 self.dirstate.forget(f)
1118 self.dirstate.forget(f)
1105 finally:
1119 finally:
1106 del wlock
1120 del wlock
1107
1121
1108 def remove(self, list, unlink=False):
1122 def remove(self, list, unlink=False):
1109 wlock = None
1123 wlock = None
1110 try:
1124 try:
1111 if unlink:
1125 if unlink:
1112 for f in list:
1126 for f in list:
1113 try:
1127 try:
1114 util.unlink(self.wjoin(f))
1128 util.unlink(self.wjoin(f))
1115 except OSError, inst:
1129 except OSError, inst:
1116 if inst.errno != errno.ENOENT:
1130 if inst.errno != errno.ENOENT:
1117 raise
1131 raise
1118 wlock = self.wlock()
1132 wlock = self.wlock()
1119 for f in list:
1133 for f in list:
1120 if unlink and os.path.exists(self.wjoin(f)):
1134 if unlink and os.path.exists(self.wjoin(f)):
1121 self.ui.warn(_("%s still exists!\n") % f)
1135 self.ui.warn(_("%s still exists!\n") % f)
1122 elif self.dirstate[f] == 'a':
1136 elif self.dirstate[f] == 'a':
1123 self.dirstate.forget(f)
1137 self.dirstate.forget(f)
1124 elif f not in self.dirstate:
1138 elif f not in self.dirstate:
1125 self.ui.warn(_("%s not tracked!\n") % f)
1139 self.ui.warn(_("%s not tracked!\n") % f)
1126 else:
1140 else:
1127 self.dirstate.remove(f)
1141 self.dirstate.remove(f)
1128 finally:
1142 finally:
1129 del wlock
1143 del wlock
1130
1144
1131 def undelete(self, list):
1145 def undelete(self, list):
1132 wlock = None
1146 wlock = None
1133 try:
1147 try:
1134 manifests = [self.manifest.read(self.changelog.read(p)[0])
1148 manifests = [self.manifest.read(self.changelog.read(p)[0])
1135 for p in self.dirstate.parents() if p != nullid]
1149 for p in self.dirstate.parents() if p != nullid]
1136 wlock = self.wlock()
1150 wlock = self.wlock()
1137 for f in list:
1151 for f in list:
1138 if self.dirstate[f] != 'r':
1152 if self.dirstate[f] != 'r':
1139 self.ui.warn(_("%s not removed!\n") % f)
1153 self.ui.warn(_("%s not removed!\n") % f)
1140 else:
1154 else:
1141 m = f in manifests[0] and manifests[0] or manifests[1]
1155 m = f in manifests[0] and manifests[0] or manifests[1]
1142 t = self.file(f).read(m[f])
1156 t = self.file(f).read(m[f])
1143 self.wwrite(f, t, m.flags(f))
1157 self.wwrite(f, t, m.flags(f))
1144 self.dirstate.normal(f)
1158 self.dirstate.normal(f)
1145 finally:
1159 finally:
1146 del wlock
1160 del wlock
1147
1161
1148 def copy(self, source, dest):
1162 def copy(self, source, dest):
1149 wlock = None
1163 wlock = None
1150 try:
1164 try:
1151 p = self.wjoin(dest)
1165 p = self.wjoin(dest)
1152 if not (os.path.exists(p) or os.path.islink(p)):
1166 if not (os.path.exists(p) or os.path.islink(p)):
1153 self.ui.warn(_("%s does not exist!\n") % dest)
1167 self.ui.warn(_("%s does not exist!\n") % dest)
1154 elif not (os.path.isfile(p) or os.path.islink(p)):
1168 elif not (os.path.isfile(p) or os.path.islink(p)):
1155 self.ui.warn(_("copy failed: %s is not a file or a "
1169 self.ui.warn(_("copy failed: %s is not a file or a "
1156 "symbolic link\n") % dest)
1170 "symbolic link\n") % dest)
1157 else:
1171 else:
1158 wlock = self.wlock()
1172 wlock = self.wlock()
1159 if self.dirstate[dest] in '?r':
1173 if self.dirstate[dest] in '?r':
1160 self.dirstate.add(dest)
1174 self.dirstate.add(dest)
1161 self.dirstate.copy(source, dest)
1175 self.dirstate.copy(source, dest)
1162 finally:
1176 finally:
1163 del wlock
1177 del wlock
1164
1178
1165 def heads(self, start=None):
1179 def heads(self, start=None):
1166 heads = self.changelog.heads(start)
1180 heads = self.changelog.heads(start)
1167 # sort the output in rev descending order
1181 # sort the output in rev descending order
1168 heads = [(-self.changelog.rev(h), h) for h in heads]
1182 heads = [(-self.changelog.rev(h), h) for h in heads]
1169 return [n for (r, n) in util.sort(heads)]
1183 return [n for (r, n) in util.sort(heads)]
1170
1184
1171 def branchheads(self, branch=None, start=None):
1185 def branchheads(self, branch=None, start=None):
1172 if branch is None:
1186 if branch is None:
1173 branch = self[None].branch()
1187 branch = self[None].branch()
1174 branches = self.branchtags()
1188 branches = self._branchheads()
1175 if branch not in branches:
1189 if branch not in branches:
1176 return []
1190 return []
1177 # The basic algorithm is this:
1191 bheads = branches[branch]
1178 #
1192 # the cache returns heads ordered lowest to highest
1179 # Start from the branch tip since there are no later revisions that can
1193 bheads.reverse()
1180 # possibly be in this branch, and the tip is a guaranteed head.
1181 #
1182 # Remember the tip's parents as the first ancestors, since these by
1183 # definition are not heads.
1184 #
1185 # Step backwards from the brach tip through all the revisions. We are
1186 # guaranteed by the rules of Mercurial that we will now be visiting the
1187 # nodes in reverse topological order (children before parents).
1188 #
1189 # If a revision is one of the ancestors of a head then we can toss it
1190 # out of the ancestors set (we've already found it and won't be
1191 # visiting it again) and put its parents in the ancestors set.
1192 #
1193 # Otherwise, if a revision is in the branch it's another head, since it
1194 # wasn't in the ancestor list of an existing head. So add it to the
1195 # head list, and add its parents to the ancestor list.
1196 #
1197 # If it is not in the branch ignore it.
1198 #
1199 # Once we have a list of heads, use nodesbetween to filter out all the
1200 # heads that cannot be reached from startrev. There may be a more
1201 # efficient way to do this as part of the previous algorithm.
1202
1203 set = util.set
1204 heads = [self.changelog.rev(branches[branch])]
1205 # Don't care if ancestors contains nullrev or not.
1206 ancestors = set(self.changelog.parentrevs(heads[0]))
1207 for rev in xrange(heads[0] - 1, nullrev, -1):
1208 if rev in ancestors:
1209 ancestors.update(self.changelog.parentrevs(rev))
1210 ancestors.remove(rev)
1211 elif self[rev].branch() == branch:
1212 heads.append(rev)
1213 ancestors.update(self.changelog.parentrevs(rev))
1214 heads = [self.changelog.node(rev) for rev in heads]
1215 if start is not None:
1194 if start is not None:
1216 heads = self.changelog.nodesbetween([start], heads)[2]
1195 # filter out the heads that cannot be reached from startrev
1217 return heads
1196 bheads = self.changelog.nodesbetween([start], bheads)[2]
1197 return bheads
1218
1198
1219 def branches(self, nodes):
1199 def branches(self, nodes):
1220 if not nodes:
1200 if not nodes:
1221 nodes = [self.changelog.tip()]
1201 nodes = [self.changelog.tip()]
1222 b = []
1202 b = []
1223 for n in nodes:
1203 for n in nodes:
1224 t = n
1204 t = n
1225 while 1:
1205 while 1:
1226 p = self.changelog.parents(n)
1206 p = self.changelog.parents(n)
1227 if p[1] != nullid or p[0] == nullid:
1207 if p[1] != nullid or p[0] == nullid:
1228 b.append((t, n, p[0], p[1]))
1208 b.append((t, n, p[0], p[1]))
1229 break
1209 break
1230 n = p[0]
1210 n = p[0]
1231 return b
1211 return b
1232
1212
1233 def between(self, pairs):
1213 def between(self, pairs):
1234 r = []
1214 r = []
1235
1215
1236 for top, bottom in pairs:
1216 for top, bottom in pairs:
1237 n, l, i = top, [], 0
1217 n, l, i = top, [], 0
1238 f = 1
1218 f = 1
1239
1219
1240 while n != bottom:
1220 while n != bottom:
1241 p = self.changelog.parents(n)[0]
1221 p = self.changelog.parents(n)[0]
1242 if i == f:
1222 if i == f:
1243 l.append(n)
1223 l.append(n)
1244 f = f * 2
1224 f = f * 2
1245 n = p
1225 n = p
1246 i += 1
1226 i += 1
1247
1227
1248 r.append(l)
1228 r.append(l)
1249
1229
1250 return r
1230 return r
1251
1231
1252 def findincoming(self, remote, base=None, heads=None, force=False):
1232 def findincoming(self, remote, base=None, heads=None, force=False):
1253 """Return list of roots of the subsets of missing nodes from remote
1233 """Return list of roots of the subsets of missing nodes from remote
1254
1234
1255 If base dict is specified, assume that these nodes and their parents
1235 If base dict is specified, assume that these nodes and their parents
1256 exist on the remote side and that no child of a node of base exists
1236 exist on the remote side and that no child of a node of base exists
1257 in both remote and self.
1237 in both remote and self.
1258 Furthermore base will be updated to include the nodes that exists
1238 Furthermore base will be updated to include the nodes that exists
1259 in self and remote but no children exists in self and remote.
1239 in self and remote but no children exists in self and remote.
1260 If a list of heads is specified, return only nodes which are heads
1240 If a list of heads is specified, return only nodes which are heads
1261 or ancestors of these heads.
1241 or ancestors of these heads.
1262
1242
1263 All the ancestors of base are in self and in remote.
1243 All the ancestors of base are in self and in remote.
1264 All the descendants of the list returned are missing in self.
1244 All the descendants of the list returned are missing in self.
1265 (and so we know that the rest of the nodes are missing in remote, see
1245 (and so we know that the rest of the nodes are missing in remote, see
1266 outgoing)
1246 outgoing)
1267 """
1247 """
1268 return self.findcommonincoming(remote, base, heads, force)[1]
1248 return self.findcommonincoming(remote, base, heads, force)[1]
1269
1249
1270 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1250 def findcommonincoming(self, remote, base=None, heads=None, force=False):
1271 """Return a tuple (common, missing roots, heads) used to identify
1251 """Return a tuple (common, missing roots, heads) used to identify
1272 missing nodes from remote.
1252 missing nodes from remote.
1273
1253
1274 If base dict is specified, assume that these nodes and their parents
1254 If base dict is specified, assume that these nodes and their parents
1275 exist on the remote side and that no child of a node of base exists
1255 exist on the remote side and that no child of a node of base exists
1276 in both remote and self.
1256 in both remote and self.
1277 Furthermore base will be updated to include the nodes that exists
1257 Furthermore base will be updated to include the nodes that exists
1278 in self and remote but no children exists in self and remote.
1258 in self and remote but no children exists in self and remote.
1279 If a list of heads is specified, return only nodes which are heads
1259 If a list of heads is specified, return only nodes which are heads
1280 or ancestors of these heads.
1260 or ancestors of these heads.
1281
1261
1282 All the ancestors of base are in self and in remote.
1262 All the ancestors of base are in self and in remote.
1283 """
1263 """
1284 m = self.changelog.nodemap
1264 m = self.changelog.nodemap
1285 search = []
1265 search = []
1286 fetch = {}
1266 fetch = {}
1287 seen = {}
1267 seen = {}
1288 seenbranch = {}
1268 seenbranch = {}
1289 if base == None:
1269 if base == None:
1290 base = {}
1270 base = {}
1291
1271
1292 if not heads:
1272 if not heads:
1293 heads = remote.heads()
1273 heads = remote.heads()
1294
1274
1295 if self.changelog.tip() == nullid:
1275 if self.changelog.tip() == nullid:
1296 base[nullid] = 1
1276 base[nullid] = 1
1297 if heads != [nullid]:
1277 if heads != [nullid]:
1298 return [nullid], [nullid], list(heads)
1278 return [nullid], [nullid], list(heads)
1299 return [nullid], [], []
1279 return [nullid], [], []
1300
1280
1301 # assume we're closer to the tip than the root
1281 # assume we're closer to the tip than the root
1302 # and start by examining the heads
1282 # and start by examining the heads
1303 self.ui.status(_("searching for changes\n"))
1283 self.ui.status(_("searching for changes\n"))
1304
1284
1305 unknown = []
1285 unknown = []
1306 for h in heads:
1286 for h in heads:
1307 if h not in m:
1287 if h not in m:
1308 unknown.append(h)
1288 unknown.append(h)
1309 else:
1289 else:
1310 base[h] = 1
1290 base[h] = 1
1311
1291
1312 heads = unknown
1292 heads = unknown
1313 if not unknown:
1293 if not unknown:
1314 return base.keys(), [], []
1294 return base.keys(), [], []
1315
1295
1316 req = dict.fromkeys(unknown)
1296 req = dict.fromkeys(unknown)
1317 reqcnt = 0
1297 reqcnt = 0
1318
1298
1319 # search through remote branches
1299 # search through remote branches
1320 # a 'branch' here is a linear segment of history, with four parts:
1300 # a 'branch' here is a linear segment of history, with four parts:
1321 # head, root, first parent, second parent
1301 # head, root, first parent, second parent
1322 # (a branch always has two parents (or none) by definition)
1302 # (a branch always has two parents (or none) by definition)
1323 unknown = remote.branches(unknown)
1303 unknown = remote.branches(unknown)
1324 while unknown:
1304 while unknown:
1325 r = []
1305 r = []
1326 while unknown:
1306 while unknown:
1327 n = unknown.pop(0)
1307 n = unknown.pop(0)
1328 if n[0] in seen:
1308 if n[0] in seen:
1329 continue
1309 continue
1330
1310
1331 self.ui.debug(_("examining %s:%s\n")
1311 self.ui.debug(_("examining %s:%s\n")
1332 % (short(n[0]), short(n[1])))
1312 % (short(n[0]), short(n[1])))
1333 if n[0] == nullid: # found the end of the branch
1313 if n[0] == nullid: # found the end of the branch
1334 pass
1314 pass
1335 elif n in seenbranch:
1315 elif n in seenbranch:
1336 self.ui.debug(_("branch already found\n"))
1316 self.ui.debug(_("branch already found\n"))
1337 continue
1317 continue
1338 elif n[1] and n[1] in m: # do we know the base?
1318 elif n[1] and n[1] in m: # do we know the base?
1339 self.ui.debug(_("found incomplete branch %s:%s\n")
1319 self.ui.debug(_("found incomplete branch %s:%s\n")
1340 % (short(n[0]), short(n[1])))
1320 % (short(n[0]), short(n[1])))
1341 search.append(n[0:2]) # schedule branch range for scanning
1321 search.append(n[0:2]) # schedule branch range for scanning
1342 seenbranch[n] = 1
1322 seenbranch[n] = 1
1343 else:
1323 else:
1344 if n[1] not in seen and n[1] not in fetch:
1324 if n[1] not in seen and n[1] not in fetch:
1345 if n[2] in m and n[3] in m:
1325 if n[2] in m and n[3] in m:
1346 self.ui.debug(_("found new changeset %s\n") %
1326 self.ui.debug(_("found new changeset %s\n") %
1347 short(n[1]))
1327 short(n[1]))
1348 fetch[n[1]] = 1 # earliest unknown
1328 fetch[n[1]] = 1 # earliest unknown
1349 for p in n[2:4]:
1329 for p in n[2:4]:
1350 if p in m:
1330 if p in m:
1351 base[p] = 1 # latest known
1331 base[p] = 1 # latest known
1352
1332
1353 for p in n[2:4]:
1333 for p in n[2:4]:
1354 if p not in req and p not in m:
1334 if p not in req and p not in m:
1355 r.append(p)
1335 r.append(p)
1356 req[p] = 1
1336 req[p] = 1
1357 seen[n[0]] = 1
1337 seen[n[0]] = 1
1358
1338
1359 if r:
1339 if r:
1360 reqcnt += 1
1340 reqcnt += 1
1361 self.ui.debug(_("request %d: %s\n") %
1341 self.ui.debug(_("request %d: %s\n") %
1362 (reqcnt, " ".join(map(short, r))))
1342 (reqcnt, " ".join(map(short, r))))
1363 for p in xrange(0, len(r), 10):
1343 for p in xrange(0, len(r), 10):
1364 for b in remote.branches(r[p:p+10]):
1344 for b in remote.branches(r[p:p+10]):
1365 self.ui.debug(_("received %s:%s\n") %
1345 self.ui.debug(_("received %s:%s\n") %
1366 (short(b[0]), short(b[1])))
1346 (short(b[0]), short(b[1])))
1367 unknown.append(b)
1347 unknown.append(b)
1368
1348
1369 # do binary search on the branches we found
1349 # do binary search on the branches we found
1370 while search:
1350 while search:
1371 newsearch = []
1351 newsearch = []
1372 reqcnt += 1
1352 reqcnt += 1
1373 for n, l in zip(search, remote.between(search)):
1353 for n, l in zip(search, remote.between(search)):
1374 l.append(n[1])
1354 l.append(n[1])
1375 p = n[0]
1355 p = n[0]
1376 f = 1
1356 f = 1
1377 for i in l:
1357 for i in l:
1378 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1358 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
1379 if i in m:
1359 if i in m:
1380 if f <= 2:
1360 if f <= 2:
1381 self.ui.debug(_("found new branch changeset %s\n") %
1361 self.ui.debug(_("found new branch changeset %s\n") %
1382 short(p))
1362 short(p))
1383 fetch[p] = 1
1363 fetch[p] = 1
1384 base[i] = 1
1364 base[i] = 1
1385 else:
1365 else:
1386 self.ui.debug(_("narrowed branch search to %s:%s\n")
1366 self.ui.debug(_("narrowed branch search to %s:%s\n")
1387 % (short(p), short(i)))
1367 % (short(p), short(i)))
1388 newsearch.append((p, i))
1368 newsearch.append((p, i))
1389 break
1369 break
1390 p, f = i, f * 2
1370 p, f = i, f * 2
1391 search = newsearch
1371 search = newsearch
1392
1372
1393 # sanity check our fetch list
1373 # sanity check our fetch list
1394 for f in fetch.keys():
1374 for f in fetch.keys():
1395 if f in m:
1375 if f in m:
1396 raise error.RepoError(_("already have changeset ")
1376 raise error.RepoError(_("already have changeset ")
1397 + short(f[:4]))
1377 + short(f[:4]))
1398
1378
1399 if base.keys() == [nullid]:
1379 if base.keys() == [nullid]:
1400 if force:
1380 if force:
1401 self.ui.warn(_("warning: repository is unrelated\n"))
1381 self.ui.warn(_("warning: repository is unrelated\n"))
1402 else:
1382 else:
1403 raise util.Abort(_("repository is unrelated"))
1383 raise util.Abort(_("repository is unrelated"))
1404
1384
1405 self.ui.debug(_("found new changesets starting at ") +
1385 self.ui.debug(_("found new changesets starting at ") +
1406 " ".join([short(f) for f in fetch]) + "\n")
1386 " ".join([short(f) for f in fetch]) + "\n")
1407
1387
1408 self.ui.debug(_("%d total queries\n") % reqcnt)
1388 self.ui.debug(_("%d total queries\n") % reqcnt)
1409
1389
1410 return base.keys(), fetch.keys(), heads
1390 return base.keys(), fetch.keys(), heads
1411
1391
1412 def findoutgoing(self, remote, base=None, heads=None, force=False):
1392 def findoutgoing(self, remote, base=None, heads=None, force=False):
1413 """Return list of nodes that are roots of subsets not in remote
1393 """Return list of nodes that are roots of subsets not in remote
1414
1394
1415 If base dict is specified, assume that these nodes and their parents
1395 If base dict is specified, assume that these nodes and their parents
1416 exist on the remote side.
1396 exist on the remote side.
1417 If a list of heads is specified, return only nodes which are heads
1397 If a list of heads is specified, return only nodes which are heads
1418 or ancestors of these heads, and return a second element which
1398 or ancestors of these heads, and return a second element which
1419 contains all remote heads which get new children.
1399 contains all remote heads which get new children.
1420 """
1400 """
1421 if base == None:
1401 if base == None:
1422 base = {}
1402 base = {}
1423 self.findincoming(remote, base, heads, force=force)
1403 self.findincoming(remote, base, heads, force=force)
1424
1404
1425 self.ui.debug(_("common changesets up to ")
1405 self.ui.debug(_("common changesets up to ")
1426 + " ".join(map(short, base.keys())) + "\n")
1406 + " ".join(map(short, base.keys())) + "\n")
1427
1407
1428 remain = dict.fromkeys(self.changelog.nodemap)
1408 remain = dict.fromkeys(self.changelog.nodemap)
1429
1409
1430 # prune everything remote has from the tree
1410 # prune everything remote has from the tree
1431 del remain[nullid]
1411 del remain[nullid]
1432 remove = base.keys()
1412 remove = base.keys()
1433 while remove:
1413 while remove:
1434 n = remove.pop(0)
1414 n = remove.pop(0)
1435 if n in remain:
1415 if n in remain:
1436 del remain[n]
1416 del remain[n]
1437 for p in self.changelog.parents(n):
1417 for p in self.changelog.parents(n):
1438 remove.append(p)
1418 remove.append(p)
1439
1419
1440 # find every node whose parents have been pruned
1420 # find every node whose parents have been pruned
1441 subset = []
1421 subset = []
1442 # find every remote head that will get new children
1422 # find every remote head that will get new children
1443 updated_heads = {}
1423 updated_heads = {}
1444 for n in remain:
1424 for n in remain:
1445 p1, p2 = self.changelog.parents(n)
1425 p1, p2 = self.changelog.parents(n)
1446 if p1 not in remain and p2 not in remain:
1426 if p1 not in remain and p2 not in remain:
1447 subset.append(n)
1427 subset.append(n)
1448 if heads:
1428 if heads:
1449 if p1 in heads:
1429 if p1 in heads:
1450 updated_heads[p1] = True
1430 updated_heads[p1] = True
1451 if p2 in heads:
1431 if p2 in heads:
1452 updated_heads[p2] = True
1432 updated_heads[p2] = True
1453
1433
1454 # this is the set of all roots we have to push
1434 # this is the set of all roots we have to push
1455 if heads:
1435 if heads:
1456 return subset, updated_heads.keys()
1436 return subset, updated_heads.keys()
1457 else:
1437 else:
1458 return subset
1438 return subset
1459
1439
1460 def pull(self, remote, heads=None, force=False):
1440 def pull(self, remote, heads=None, force=False):
1461 lock = self.lock()
1441 lock = self.lock()
1462 try:
1442 try:
1463 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1443 common, fetch, rheads = self.findcommonincoming(remote, heads=heads,
1464 force=force)
1444 force=force)
1465 if fetch == [nullid]:
1445 if fetch == [nullid]:
1466 self.ui.status(_("requesting all changes\n"))
1446 self.ui.status(_("requesting all changes\n"))
1467
1447
1468 if not fetch:
1448 if not fetch:
1469 self.ui.status(_("no changes found\n"))
1449 self.ui.status(_("no changes found\n"))
1470 return 0
1450 return 0
1471
1451
1472 if heads is None and remote.capable('changegroupsubset'):
1452 if heads is None and remote.capable('changegroupsubset'):
1473 heads = rheads
1453 heads = rheads
1474
1454
1475 if heads is None:
1455 if heads is None:
1476 cg = remote.changegroup(fetch, 'pull')
1456 cg = remote.changegroup(fetch, 'pull')
1477 else:
1457 else:
1478 if not remote.capable('changegroupsubset'):
1458 if not remote.capable('changegroupsubset'):
1479 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1459 raise util.Abort(_("Partial pull cannot be done because other repository doesn't support changegroupsubset."))
1480 cg = remote.changegroupsubset(fetch, heads, 'pull')
1460 cg = remote.changegroupsubset(fetch, heads, 'pull')
1481 return self.addchangegroup(cg, 'pull', remote.url())
1461 return self.addchangegroup(cg, 'pull', remote.url())
1482 finally:
1462 finally:
1483 del lock
1463 del lock
1484
1464
1485 def push(self, remote, force=False, revs=None):
1465 def push(self, remote, force=False, revs=None):
1486 # there are two ways to push to remote repo:
1466 # there are two ways to push to remote repo:
1487 #
1467 #
1488 # addchangegroup assumes local user can lock remote
1468 # addchangegroup assumes local user can lock remote
1489 # repo (local filesystem, old ssh servers).
1469 # repo (local filesystem, old ssh servers).
1490 #
1470 #
1491 # unbundle assumes local user cannot lock remote repo (new ssh
1471 # unbundle assumes local user cannot lock remote repo (new ssh
1492 # servers, http servers).
1472 # servers, http servers).
1493
1473
1494 if remote.capable('unbundle'):
1474 if remote.capable('unbundle'):
1495 return self.push_unbundle(remote, force, revs)
1475 return self.push_unbundle(remote, force, revs)
1496 return self.push_addchangegroup(remote, force, revs)
1476 return self.push_addchangegroup(remote, force, revs)
1497
1477
1498 def prepush(self, remote, force, revs):
1478 def prepush(self, remote, force, revs):
1499 common = {}
1479 common = {}
1500 remote_heads = remote.heads()
1480 remote_heads = remote.heads()
1501 inc = self.findincoming(remote, common, remote_heads, force=force)
1481 inc = self.findincoming(remote, common, remote_heads, force=force)
1502
1482
1503 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1483 update, updated_heads = self.findoutgoing(remote, common, remote_heads)
1504 if revs is not None:
1484 if revs is not None:
1505 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1485 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
1506 else:
1486 else:
1507 bases, heads = update, self.changelog.heads()
1487 bases, heads = update, self.changelog.heads()
1508
1488
1509 if not bases:
1489 if not bases:
1510 self.ui.status(_("no changes found\n"))
1490 self.ui.status(_("no changes found\n"))
1511 return None, 1
1491 return None, 1
1512 elif not force:
1492 elif not force:
1513 # check if we're creating new remote heads
1493 # check if we're creating new remote heads
1514 # to be a remote head after push, node must be either
1494 # to be a remote head after push, node must be either
1515 # - unknown locally
1495 # - unknown locally
1516 # - a local outgoing head descended from update
1496 # - a local outgoing head descended from update
1517 # - a remote head that's known locally and not
1497 # - a remote head that's known locally and not
1518 # ancestral to an outgoing head
1498 # ancestral to an outgoing head
1519
1499
1520 warn = 0
1500 warn = 0
1521
1501
1522 if remote_heads == [nullid]:
1502 if remote_heads == [nullid]:
1523 warn = 0
1503 warn = 0
1524 elif not revs and len(heads) > len(remote_heads):
1504 elif not revs and len(heads) > len(remote_heads):
1525 warn = 1
1505 warn = 1
1526 else:
1506 else:
1527 newheads = list(heads)
1507 newheads = list(heads)
1528 for r in remote_heads:
1508 for r in remote_heads:
1529 if r in self.changelog.nodemap:
1509 if r in self.changelog.nodemap:
1530 desc = self.changelog.heads(r, heads)
1510 desc = self.changelog.heads(r, heads)
1531 l = [h for h in heads if h in desc]
1511 l = [h for h in heads if h in desc]
1532 if not l:
1512 if not l:
1533 newheads.append(r)
1513 newheads.append(r)
1534 else:
1514 else:
1535 newheads.append(r)
1515 newheads.append(r)
1536 if len(newheads) > len(remote_heads):
1516 if len(newheads) > len(remote_heads):
1537 warn = 1
1517 warn = 1
1538
1518
1539 if warn:
1519 if warn:
1540 self.ui.warn(_("abort: push creates new remote heads!\n"))
1520 self.ui.warn(_("abort: push creates new remote heads!\n"))
1541 self.ui.status(_("(did you forget to merge?"
1521 self.ui.status(_("(did you forget to merge?"
1542 " use push -f to force)\n"))
1522 " use push -f to force)\n"))
1543 return None, 0
1523 return None, 0
1544 elif inc:
1524 elif inc:
1545 self.ui.warn(_("note: unsynced remote changes!\n"))
1525 self.ui.warn(_("note: unsynced remote changes!\n"))
1546
1526
1547
1527
1548 if revs is None:
1528 if revs is None:
1549 # use the fast path, no race possible on push
1529 # use the fast path, no race possible on push
1550 cg = self._changegroup(common.keys(), 'push')
1530 cg = self._changegroup(common.keys(), 'push')
1551 else:
1531 else:
1552 cg = self.changegroupsubset(update, revs, 'push')
1532 cg = self.changegroupsubset(update, revs, 'push')
1553 return cg, remote_heads
1533 return cg, remote_heads
1554
1534
1555 def push_addchangegroup(self, remote, force, revs):
1535 def push_addchangegroup(self, remote, force, revs):
1556 lock = remote.lock()
1536 lock = remote.lock()
1557 try:
1537 try:
1558 ret = self.prepush(remote, force, revs)
1538 ret = self.prepush(remote, force, revs)
1559 if ret[0] is not None:
1539 if ret[0] is not None:
1560 cg, remote_heads = ret
1540 cg, remote_heads = ret
1561 return remote.addchangegroup(cg, 'push', self.url())
1541 return remote.addchangegroup(cg, 'push', self.url())
1562 return ret[1]
1542 return ret[1]
1563 finally:
1543 finally:
1564 del lock
1544 del lock
1565
1545
1566 def push_unbundle(self, remote, force, revs):
1546 def push_unbundle(self, remote, force, revs):
1567 # local repo finds heads on server, finds out what revs it
1547 # local repo finds heads on server, finds out what revs it
1568 # must push. once revs transferred, if server finds it has
1548 # must push. once revs transferred, if server finds it has
1569 # different heads (someone else won commit/push race), server
1549 # different heads (someone else won commit/push race), server
1570 # aborts.
1550 # aborts.
1571
1551
1572 ret = self.prepush(remote, force, revs)
1552 ret = self.prepush(remote, force, revs)
1573 if ret[0] is not None:
1553 if ret[0] is not None:
1574 cg, remote_heads = ret
1554 cg, remote_heads = ret
1575 if force: remote_heads = ['force']
1555 if force: remote_heads = ['force']
1576 return remote.unbundle(cg, remote_heads, 'push')
1556 return remote.unbundle(cg, remote_heads, 'push')
1577 return ret[1]
1557 return ret[1]
1578
1558
1579 def changegroupinfo(self, nodes, source):
1559 def changegroupinfo(self, nodes, source):
1580 if self.ui.verbose or source == 'bundle':
1560 if self.ui.verbose or source == 'bundle':
1581 self.ui.status(_("%d changesets found\n") % len(nodes))
1561 self.ui.status(_("%d changesets found\n") % len(nodes))
1582 if self.ui.debugflag:
1562 if self.ui.debugflag:
1583 self.ui.debug(_("list of changesets:\n"))
1563 self.ui.debug(_("list of changesets:\n"))
1584 for node in nodes:
1564 for node in nodes:
1585 self.ui.debug("%s\n" % hex(node))
1565 self.ui.debug("%s\n" % hex(node))
1586
1566
1587 def changegroupsubset(self, bases, heads, source, extranodes=None):
1567 def changegroupsubset(self, bases, heads, source, extranodes=None):
1588 """This function generates a changegroup consisting of all the nodes
1568 """This function generates a changegroup consisting of all the nodes
1589 that are descendents of any of the bases, and ancestors of any of
1569 that are descendents of any of the bases, and ancestors of any of
1590 the heads.
1570 the heads.
1591
1571
1592 It is fairly complex as determining which filenodes and which
1572 It is fairly complex as determining which filenodes and which
1593 manifest nodes need to be included for the changeset to be complete
1573 manifest nodes need to be included for the changeset to be complete
1594 is non-trivial.
1574 is non-trivial.
1595
1575
1596 Another wrinkle is doing the reverse, figuring out which changeset in
1576 Another wrinkle is doing the reverse, figuring out which changeset in
1597 the changegroup a particular filenode or manifestnode belongs to.
1577 the changegroup a particular filenode or manifestnode belongs to.
1598
1578
1599 The caller can specify some nodes that must be included in the
1579 The caller can specify some nodes that must be included in the
1600 changegroup using the extranodes argument. It should be a dict
1580 changegroup using the extranodes argument. It should be a dict
1601 where the keys are the filenames (or 1 for the manifest), and the
1581 where the keys are the filenames (or 1 for the manifest), and the
1602 values are lists of (node, linknode) tuples, where node is a wanted
1582 values are lists of (node, linknode) tuples, where node is a wanted
1603 node and linknode is the changelog node that should be transmitted as
1583 node and linknode is the changelog node that should be transmitted as
1604 the linkrev.
1584 the linkrev.
1605 """
1585 """
1606
1586
1607 if extranodes is None:
1587 if extranodes is None:
1608 # can we go through the fast path ?
1588 # can we go through the fast path ?
1609 heads.sort()
1589 heads.sort()
1610 allheads = self.heads()
1590 allheads = self.heads()
1611 allheads.sort()
1591 allheads.sort()
1612 if heads == allheads:
1592 if heads == allheads:
1613 common = []
1593 common = []
1614 # parents of bases are known from both sides
1594 # parents of bases are known from both sides
1615 for n in bases:
1595 for n in bases:
1616 for p in self.changelog.parents(n):
1596 for p in self.changelog.parents(n):
1617 if p != nullid:
1597 if p != nullid:
1618 common.append(p)
1598 common.append(p)
1619 return self._changegroup(common, source)
1599 return self._changegroup(common, source)
1620
1600
1621 self.hook('preoutgoing', throw=True, source=source)
1601 self.hook('preoutgoing', throw=True, source=source)
1622
1602
1623 # Set up some initial variables
1603 # Set up some initial variables
1624 # Make it easy to refer to self.changelog
1604 # Make it easy to refer to self.changelog
1625 cl = self.changelog
1605 cl = self.changelog
1626 # msng is short for missing - compute the list of changesets in this
1606 # msng is short for missing - compute the list of changesets in this
1627 # changegroup.
1607 # changegroup.
1628 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1608 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1629 self.changegroupinfo(msng_cl_lst, source)
1609 self.changegroupinfo(msng_cl_lst, source)
1630 # Some bases may turn out to be superfluous, and some heads may be
1610 # Some bases may turn out to be superfluous, and some heads may be
1631 # too. nodesbetween will return the minimal set of bases and heads
1611 # too. nodesbetween will return the minimal set of bases and heads
1632 # necessary to re-create the changegroup.
1612 # necessary to re-create the changegroup.
1633
1613
1634 # Known heads are the list of heads that it is assumed the recipient
1614 # Known heads are the list of heads that it is assumed the recipient
1635 # of this changegroup will know about.
1615 # of this changegroup will know about.
1636 knownheads = {}
1616 knownheads = {}
1637 # We assume that all parents of bases are known heads.
1617 # We assume that all parents of bases are known heads.
1638 for n in bases:
1618 for n in bases:
1639 for p in cl.parents(n):
1619 for p in cl.parents(n):
1640 if p != nullid:
1620 if p != nullid:
1641 knownheads[p] = 1
1621 knownheads[p] = 1
1642 knownheads = knownheads.keys()
1622 knownheads = knownheads.keys()
1643 if knownheads:
1623 if knownheads:
1644 # Now that we know what heads are known, we can compute which
1624 # Now that we know what heads are known, we can compute which
1645 # changesets are known. The recipient must know about all
1625 # changesets are known. The recipient must know about all
1646 # changesets required to reach the known heads from the null
1626 # changesets required to reach the known heads from the null
1647 # changeset.
1627 # changeset.
1648 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1628 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1649 junk = None
1629 junk = None
1650 # Transform the list into an ersatz set.
1630 # Transform the list into an ersatz set.
1651 has_cl_set = dict.fromkeys(has_cl_set)
1631 has_cl_set = dict.fromkeys(has_cl_set)
1652 else:
1632 else:
1653 # If there were no known heads, the recipient cannot be assumed to
1633 # If there were no known heads, the recipient cannot be assumed to
1654 # know about any changesets.
1634 # know about any changesets.
1655 has_cl_set = {}
1635 has_cl_set = {}
1656
1636
1657 # Make it easy to refer to self.manifest
1637 # Make it easy to refer to self.manifest
1658 mnfst = self.manifest
1638 mnfst = self.manifest
1659 # We don't know which manifests are missing yet
1639 # We don't know which manifests are missing yet
1660 msng_mnfst_set = {}
1640 msng_mnfst_set = {}
1661 # Nor do we know which filenodes are missing.
1641 # Nor do we know which filenodes are missing.
1662 msng_filenode_set = {}
1642 msng_filenode_set = {}
1663
1643
1664 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1644 junk = mnfst.index[len(mnfst) - 1] # Get around a bug in lazyindex
1665 junk = None
1645 junk = None
1666
1646
1667 # A changeset always belongs to itself, so the changenode lookup
1647 # A changeset always belongs to itself, so the changenode lookup
1668 # function for a changenode is identity.
1648 # function for a changenode is identity.
1669 def identity(x):
1649 def identity(x):
1670 return x
1650 return x
1671
1651
1672 # A function generating function. Sets up an environment for the
1652 # A function generating function. Sets up an environment for the
1673 # inner function.
1653 # inner function.
1674 def cmp_by_rev_func(revlog):
1654 def cmp_by_rev_func(revlog):
1675 # Compare two nodes by their revision number in the environment's
1655 # Compare two nodes by their revision number in the environment's
1676 # revision history. Since the revision number both represents the
1656 # revision history. Since the revision number both represents the
1677 # most efficient order to read the nodes in, and represents a
1657 # most efficient order to read the nodes in, and represents a
1678 # topological sorting of the nodes, this function is often useful.
1658 # topological sorting of the nodes, this function is often useful.
1679 def cmp_by_rev(a, b):
1659 def cmp_by_rev(a, b):
1680 return cmp(revlog.rev(a), revlog.rev(b))
1660 return cmp(revlog.rev(a), revlog.rev(b))
1681 return cmp_by_rev
1661 return cmp_by_rev
1682
1662
1683 # If we determine that a particular file or manifest node must be a
1663 # If we determine that a particular file or manifest node must be a
1684 # node that the recipient of the changegroup will already have, we can
1664 # node that the recipient of the changegroup will already have, we can
1685 # also assume the recipient will have all the parents. This function
1665 # also assume the recipient will have all the parents. This function
1686 # prunes them from the set of missing nodes.
1666 # prunes them from the set of missing nodes.
1687 def prune_parents(revlog, hasset, msngset):
1667 def prune_parents(revlog, hasset, msngset):
1688 haslst = hasset.keys()
1668 haslst = hasset.keys()
1689 haslst.sort(cmp_by_rev_func(revlog))
1669 haslst.sort(cmp_by_rev_func(revlog))
1690 for node in haslst:
1670 for node in haslst:
1691 parentlst = [p for p in revlog.parents(node) if p != nullid]
1671 parentlst = [p for p in revlog.parents(node) if p != nullid]
1692 while parentlst:
1672 while parentlst:
1693 n = parentlst.pop()
1673 n = parentlst.pop()
1694 if n not in hasset:
1674 if n not in hasset:
1695 hasset[n] = 1
1675 hasset[n] = 1
1696 p = [p for p in revlog.parents(n) if p != nullid]
1676 p = [p for p in revlog.parents(n) if p != nullid]
1697 parentlst.extend(p)
1677 parentlst.extend(p)
1698 for n in hasset:
1678 for n in hasset:
1699 msngset.pop(n, None)
1679 msngset.pop(n, None)
1700
1680
1701 # This is a function generating function used to set up an environment
1681 # This is a function generating function used to set up an environment
1702 # for the inner function to execute in.
1682 # for the inner function to execute in.
1703 def manifest_and_file_collector(changedfileset):
1683 def manifest_and_file_collector(changedfileset):
1704 # This is an information gathering function that gathers
1684 # This is an information gathering function that gathers
1705 # information from each changeset node that goes out as part of
1685 # information from each changeset node that goes out as part of
1706 # the changegroup. The information gathered is a list of which
1686 # the changegroup. The information gathered is a list of which
1707 # manifest nodes are potentially required (the recipient may
1687 # manifest nodes are potentially required (the recipient may
1708 # already have them) and total list of all files which were
1688 # already have them) and total list of all files which were
1709 # changed in any changeset in the changegroup.
1689 # changed in any changeset in the changegroup.
1710 #
1690 #
1711 # We also remember the first changenode we saw any manifest
1691 # We also remember the first changenode we saw any manifest
1712 # referenced by so we can later determine which changenode 'owns'
1692 # referenced by so we can later determine which changenode 'owns'
1713 # the manifest.
1693 # the manifest.
1714 def collect_manifests_and_files(clnode):
1694 def collect_manifests_and_files(clnode):
1715 c = cl.read(clnode)
1695 c = cl.read(clnode)
1716 for f in c[3]:
1696 for f in c[3]:
1717 # This is to make sure we only have one instance of each
1697 # This is to make sure we only have one instance of each
1718 # filename string for each filename.
1698 # filename string for each filename.
1719 changedfileset.setdefault(f, f)
1699 changedfileset.setdefault(f, f)
1720 msng_mnfst_set.setdefault(c[0], clnode)
1700 msng_mnfst_set.setdefault(c[0], clnode)
1721 return collect_manifests_and_files
1701 return collect_manifests_and_files
1722
1702
1723 # Figure out which manifest nodes (of the ones we think might be part
1703 # Figure out which manifest nodes (of the ones we think might be part
1724 # of the changegroup) the recipient must know about and remove them
1704 # of the changegroup) the recipient must know about and remove them
1725 # from the changegroup.
1705 # from the changegroup.
1726 def prune_manifests():
1706 def prune_manifests():
1727 has_mnfst_set = {}
1707 has_mnfst_set = {}
1728 for n in msng_mnfst_set:
1708 for n in msng_mnfst_set:
1729 # If a 'missing' manifest thinks it belongs to a changenode
1709 # If a 'missing' manifest thinks it belongs to a changenode
1730 # the recipient is assumed to have, obviously the recipient
1710 # the recipient is assumed to have, obviously the recipient
1731 # must have that manifest.
1711 # must have that manifest.
1732 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1712 linknode = cl.node(mnfst.linkrev(mnfst.rev(n)))
1733 if linknode in has_cl_set:
1713 if linknode in has_cl_set:
1734 has_mnfst_set[n] = 1
1714 has_mnfst_set[n] = 1
1735 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1715 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1736
1716
1737 # Use the information collected in collect_manifests_and_files to say
1717 # Use the information collected in collect_manifests_and_files to say
1738 # which changenode any manifestnode belongs to.
1718 # which changenode any manifestnode belongs to.
1739 def lookup_manifest_link(mnfstnode):
1719 def lookup_manifest_link(mnfstnode):
1740 return msng_mnfst_set[mnfstnode]
1720 return msng_mnfst_set[mnfstnode]
1741
1721
1742 # A function generating function that sets up the initial environment
1722 # A function generating function that sets up the initial environment
1743 # the inner function.
1723 # the inner function.
1744 def filenode_collector(changedfiles):
1724 def filenode_collector(changedfiles):
1745 next_rev = [0]
1725 next_rev = [0]
1746 # This gathers information from each manifestnode included in the
1726 # This gathers information from each manifestnode included in the
1747 # changegroup about which filenodes the manifest node references
1727 # changegroup about which filenodes the manifest node references
1748 # so we can include those in the changegroup too.
1728 # so we can include those in the changegroup too.
1749 #
1729 #
1750 # It also remembers which changenode each filenode belongs to. It
1730 # It also remembers which changenode each filenode belongs to. It
1751 # does this by assuming the a filenode belongs to the changenode
1731 # does this by assuming the a filenode belongs to the changenode
1752 # the first manifest that references it belongs to.
1732 # the first manifest that references it belongs to.
1753 def collect_msng_filenodes(mnfstnode):
1733 def collect_msng_filenodes(mnfstnode):
1754 r = mnfst.rev(mnfstnode)
1734 r = mnfst.rev(mnfstnode)
1755 if r == next_rev[0]:
1735 if r == next_rev[0]:
1756 # If the last rev we looked at was the one just previous,
1736 # If the last rev we looked at was the one just previous,
1757 # we only need to see a diff.
1737 # we only need to see a diff.
1758 deltamf = mnfst.readdelta(mnfstnode)
1738 deltamf = mnfst.readdelta(mnfstnode)
1759 # For each line in the delta
1739 # For each line in the delta
1760 for f, fnode in deltamf.iteritems():
1740 for f, fnode in deltamf.iteritems():
1761 f = changedfiles.get(f, None)
1741 f = changedfiles.get(f, None)
1762 # And if the file is in the list of files we care
1742 # And if the file is in the list of files we care
1763 # about.
1743 # about.
1764 if f is not None:
1744 if f is not None:
1765 # Get the changenode this manifest belongs to
1745 # Get the changenode this manifest belongs to
1766 clnode = msng_mnfst_set[mnfstnode]
1746 clnode = msng_mnfst_set[mnfstnode]
1767 # Create the set of filenodes for the file if
1747 # Create the set of filenodes for the file if
1768 # there isn't one already.
1748 # there isn't one already.
1769 ndset = msng_filenode_set.setdefault(f, {})
1749 ndset = msng_filenode_set.setdefault(f, {})
1770 # And set the filenode's changelog node to the
1750 # And set the filenode's changelog node to the
1771 # manifest's if it hasn't been set already.
1751 # manifest's if it hasn't been set already.
1772 ndset.setdefault(fnode, clnode)
1752 ndset.setdefault(fnode, clnode)
1773 else:
1753 else:
1774 # Otherwise we need a full manifest.
1754 # Otherwise we need a full manifest.
1775 m = mnfst.read(mnfstnode)
1755 m = mnfst.read(mnfstnode)
1776 # For every file in we care about.
1756 # For every file in we care about.
1777 for f in changedfiles:
1757 for f in changedfiles:
1778 fnode = m.get(f, None)
1758 fnode = m.get(f, None)
1779 # If it's in the manifest
1759 # If it's in the manifest
1780 if fnode is not None:
1760 if fnode is not None:
1781 # See comments above.
1761 # See comments above.
1782 clnode = msng_mnfst_set[mnfstnode]
1762 clnode = msng_mnfst_set[mnfstnode]
1783 ndset = msng_filenode_set.setdefault(f, {})
1763 ndset = msng_filenode_set.setdefault(f, {})
1784 ndset.setdefault(fnode, clnode)
1764 ndset.setdefault(fnode, clnode)
1785 # Remember the revision we hope to see next.
1765 # Remember the revision we hope to see next.
1786 next_rev[0] = r + 1
1766 next_rev[0] = r + 1
1787 return collect_msng_filenodes
1767 return collect_msng_filenodes
1788
1768
1789 # We have a list of filenodes we think we need for a file, lets remove
1769 # We have a list of filenodes we think we need for a file, lets remove
1790 # all those we now the recipient must have.
1770 # all those we now the recipient must have.
1791 def prune_filenodes(f, filerevlog):
1771 def prune_filenodes(f, filerevlog):
1792 msngset = msng_filenode_set[f]
1772 msngset = msng_filenode_set[f]
1793 hasset = {}
1773 hasset = {}
1794 # If a 'missing' filenode thinks it belongs to a changenode we
1774 # If a 'missing' filenode thinks it belongs to a changenode we
1795 # assume the recipient must have, then the recipient must have
1775 # assume the recipient must have, then the recipient must have
1796 # that filenode.
1776 # that filenode.
1797 for n in msngset:
1777 for n in msngset:
1798 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1778 clnode = cl.node(filerevlog.linkrev(filerevlog.rev(n)))
1799 if clnode in has_cl_set:
1779 if clnode in has_cl_set:
1800 hasset[n] = 1
1780 hasset[n] = 1
1801 prune_parents(filerevlog, hasset, msngset)
1781 prune_parents(filerevlog, hasset, msngset)
1802
1782
1803 # A function generator function that sets up the a context for the
1783 # A function generator function that sets up the a context for the
1804 # inner function.
1784 # inner function.
1805 def lookup_filenode_link_func(fname):
1785 def lookup_filenode_link_func(fname):
1806 msngset = msng_filenode_set[fname]
1786 msngset = msng_filenode_set[fname]
1807 # Lookup the changenode the filenode belongs to.
1787 # Lookup the changenode the filenode belongs to.
1808 def lookup_filenode_link(fnode):
1788 def lookup_filenode_link(fnode):
1809 return msngset[fnode]
1789 return msngset[fnode]
1810 return lookup_filenode_link
1790 return lookup_filenode_link
1811
1791
1812 # Add the nodes that were explicitly requested.
1792 # Add the nodes that were explicitly requested.
1813 def add_extra_nodes(name, nodes):
1793 def add_extra_nodes(name, nodes):
1814 if not extranodes or name not in extranodes:
1794 if not extranodes or name not in extranodes:
1815 return
1795 return
1816
1796
1817 for node, linknode in extranodes[name]:
1797 for node, linknode in extranodes[name]:
1818 if node not in nodes:
1798 if node not in nodes:
1819 nodes[node] = linknode
1799 nodes[node] = linknode
1820
1800
1821 # Now that we have all theses utility functions to help out and
1801 # Now that we have all theses utility functions to help out and
1822 # logically divide up the task, generate the group.
1802 # logically divide up the task, generate the group.
1823 def gengroup():
1803 def gengroup():
1824 # The set of changed files starts empty.
1804 # The set of changed files starts empty.
1825 changedfiles = {}
1805 changedfiles = {}
1826 # Create a changenode group generator that will call our functions
1806 # Create a changenode group generator that will call our functions
1827 # back to lookup the owning changenode and collect information.
1807 # back to lookup the owning changenode and collect information.
1828 group = cl.group(msng_cl_lst, identity,
1808 group = cl.group(msng_cl_lst, identity,
1829 manifest_and_file_collector(changedfiles))
1809 manifest_and_file_collector(changedfiles))
1830 for chnk in group:
1810 for chnk in group:
1831 yield chnk
1811 yield chnk
1832
1812
1833 # The list of manifests has been collected by the generator
1813 # The list of manifests has been collected by the generator
1834 # calling our functions back.
1814 # calling our functions back.
1835 prune_manifests()
1815 prune_manifests()
1836 add_extra_nodes(1, msng_mnfst_set)
1816 add_extra_nodes(1, msng_mnfst_set)
1837 msng_mnfst_lst = msng_mnfst_set.keys()
1817 msng_mnfst_lst = msng_mnfst_set.keys()
1838 # Sort the manifestnodes by revision number.
1818 # Sort the manifestnodes by revision number.
1839 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1819 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1840 # Create a generator for the manifestnodes that calls our lookup
1820 # Create a generator for the manifestnodes that calls our lookup
1841 # and data collection functions back.
1821 # and data collection functions back.
1842 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1822 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1843 filenode_collector(changedfiles))
1823 filenode_collector(changedfiles))
1844 for chnk in group:
1824 for chnk in group:
1845 yield chnk
1825 yield chnk
1846
1826
1847 # These are no longer needed, dereference and toss the memory for
1827 # These are no longer needed, dereference and toss the memory for
1848 # them.
1828 # them.
1849 msng_mnfst_lst = None
1829 msng_mnfst_lst = None
1850 msng_mnfst_set.clear()
1830 msng_mnfst_set.clear()
1851
1831
1852 if extranodes:
1832 if extranodes:
1853 for fname in extranodes:
1833 for fname in extranodes:
1854 if isinstance(fname, int):
1834 if isinstance(fname, int):
1855 continue
1835 continue
1856 msng_filenode_set.setdefault(fname, {})
1836 msng_filenode_set.setdefault(fname, {})
1857 changedfiles[fname] = 1
1837 changedfiles[fname] = 1
1858 # Go through all our files in order sorted by name.
1838 # Go through all our files in order sorted by name.
1859 for fname in util.sort(changedfiles):
1839 for fname in util.sort(changedfiles):
1860 filerevlog = self.file(fname)
1840 filerevlog = self.file(fname)
1861 if not len(filerevlog):
1841 if not len(filerevlog):
1862 raise util.Abort(_("empty or missing revlog for %s") % fname)
1842 raise util.Abort(_("empty or missing revlog for %s") % fname)
1863 # Toss out the filenodes that the recipient isn't really
1843 # Toss out the filenodes that the recipient isn't really
1864 # missing.
1844 # missing.
1865 if fname in msng_filenode_set:
1845 if fname in msng_filenode_set:
1866 prune_filenodes(fname, filerevlog)
1846 prune_filenodes(fname, filerevlog)
1867 add_extra_nodes(fname, msng_filenode_set[fname])
1847 add_extra_nodes(fname, msng_filenode_set[fname])
1868 msng_filenode_lst = msng_filenode_set[fname].keys()
1848 msng_filenode_lst = msng_filenode_set[fname].keys()
1869 else:
1849 else:
1870 msng_filenode_lst = []
1850 msng_filenode_lst = []
1871 # If any filenodes are left, generate the group for them,
1851 # If any filenodes are left, generate the group for them,
1872 # otherwise don't bother.
1852 # otherwise don't bother.
1873 if len(msng_filenode_lst) > 0:
1853 if len(msng_filenode_lst) > 0:
1874 yield changegroup.chunkheader(len(fname))
1854 yield changegroup.chunkheader(len(fname))
1875 yield fname
1855 yield fname
1876 # Sort the filenodes by their revision #
1856 # Sort the filenodes by their revision #
1877 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1857 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1878 # Create a group generator and only pass in a changenode
1858 # Create a group generator and only pass in a changenode
1879 # lookup function as we need to collect no information
1859 # lookup function as we need to collect no information
1880 # from filenodes.
1860 # from filenodes.
1881 group = filerevlog.group(msng_filenode_lst,
1861 group = filerevlog.group(msng_filenode_lst,
1882 lookup_filenode_link_func(fname))
1862 lookup_filenode_link_func(fname))
1883 for chnk in group:
1863 for chnk in group:
1884 yield chnk
1864 yield chnk
1885 if fname in msng_filenode_set:
1865 if fname in msng_filenode_set:
1886 # Don't need this anymore, toss it to free memory.
1866 # Don't need this anymore, toss it to free memory.
1887 del msng_filenode_set[fname]
1867 del msng_filenode_set[fname]
1888 # Signal that no more groups are left.
1868 # Signal that no more groups are left.
1889 yield changegroup.closechunk()
1869 yield changegroup.closechunk()
1890
1870
1891 if msng_cl_lst:
1871 if msng_cl_lst:
1892 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1872 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1893
1873
1894 return util.chunkbuffer(gengroup())
1874 return util.chunkbuffer(gengroup())
1895
1875
1896 def changegroup(self, basenodes, source):
1876 def changegroup(self, basenodes, source):
1897 # to avoid a race we use changegroupsubset() (issue1320)
1877 # to avoid a race we use changegroupsubset() (issue1320)
1898 return self.changegroupsubset(basenodes, self.heads(), source)
1878 return self.changegroupsubset(basenodes, self.heads(), source)
1899
1879
1900 def _changegroup(self, common, source):
1880 def _changegroup(self, common, source):
1901 """Generate a changegroup of all nodes that we have that a recipient
1881 """Generate a changegroup of all nodes that we have that a recipient
1902 doesn't.
1882 doesn't.
1903
1883
1904 This is much easier than the previous function as we can assume that
1884 This is much easier than the previous function as we can assume that
1905 the recipient has any changenode we aren't sending them.
1885 the recipient has any changenode we aren't sending them.
1906
1886
1907 common is the set of common nodes between remote and self"""
1887 common is the set of common nodes between remote and self"""
1908
1888
1909 self.hook('preoutgoing', throw=True, source=source)
1889 self.hook('preoutgoing', throw=True, source=source)
1910
1890
1911 cl = self.changelog
1891 cl = self.changelog
1912 nodes = cl.findmissing(common)
1892 nodes = cl.findmissing(common)
1913 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1893 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1914 self.changegroupinfo(nodes, source)
1894 self.changegroupinfo(nodes, source)
1915
1895
1916 def identity(x):
1896 def identity(x):
1917 return x
1897 return x
1918
1898
1919 def gennodelst(log):
1899 def gennodelst(log):
1920 for r in log:
1900 for r in log:
1921 if log.linkrev(r) in revset:
1901 if log.linkrev(r) in revset:
1922 yield log.node(r)
1902 yield log.node(r)
1923
1903
1924 def changed_file_collector(changedfileset):
1904 def changed_file_collector(changedfileset):
1925 def collect_changed_files(clnode):
1905 def collect_changed_files(clnode):
1926 c = cl.read(clnode)
1906 c = cl.read(clnode)
1927 for fname in c[3]:
1907 for fname in c[3]:
1928 changedfileset[fname] = 1
1908 changedfileset[fname] = 1
1929 return collect_changed_files
1909 return collect_changed_files
1930
1910
1931 def lookuprevlink_func(revlog):
1911 def lookuprevlink_func(revlog):
1932 def lookuprevlink(n):
1912 def lookuprevlink(n):
1933 return cl.node(revlog.linkrev(revlog.rev(n)))
1913 return cl.node(revlog.linkrev(revlog.rev(n)))
1934 return lookuprevlink
1914 return lookuprevlink
1935
1915
1936 def gengroup():
1916 def gengroup():
1937 # construct a list of all changed files
1917 # construct a list of all changed files
1938 changedfiles = {}
1918 changedfiles = {}
1939
1919
1940 for chnk in cl.group(nodes, identity,
1920 for chnk in cl.group(nodes, identity,
1941 changed_file_collector(changedfiles)):
1921 changed_file_collector(changedfiles)):
1942 yield chnk
1922 yield chnk
1943
1923
1944 mnfst = self.manifest
1924 mnfst = self.manifest
1945 nodeiter = gennodelst(mnfst)
1925 nodeiter = gennodelst(mnfst)
1946 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1926 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1947 yield chnk
1927 yield chnk
1948
1928
1949 for fname in util.sort(changedfiles):
1929 for fname in util.sort(changedfiles):
1950 filerevlog = self.file(fname)
1930 filerevlog = self.file(fname)
1951 if not len(filerevlog):
1931 if not len(filerevlog):
1952 raise util.Abort(_("empty or missing revlog for %s") % fname)
1932 raise util.Abort(_("empty or missing revlog for %s") % fname)
1953 nodeiter = gennodelst(filerevlog)
1933 nodeiter = gennodelst(filerevlog)
1954 nodeiter = list(nodeiter)
1934 nodeiter = list(nodeiter)
1955 if nodeiter:
1935 if nodeiter:
1956 yield changegroup.chunkheader(len(fname))
1936 yield changegroup.chunkheader(len(fname))
1957 yield fname
1937 yield fname
1958 lookup = lookuprevlink_func(filerevlog)
1938 lookup = lookuprevlink_func(filerevlog)
1959 for chnk in filerevlog.group(nodeiter, lookup):
1939 for chnk in filerevlog.group(nodeiter, lookup):
1960 yield chnk
1940 yield chnk
1961
1941
1962 yield changegroup.closechunk()
1942 yield changegroup.closechunk()
1963
1943
1964 if nodes:
1944 if nodes:
1965 self.hook('outgoing', node=hex(nodes[0]), source=source)
1945 self.hook('outgoing', node=hex(nodes[0]), source=source)
1966
1946
1967 return util.chunkbuffer(gengroup())
1947 return util.chunkbuffer(gengroup())
1968
1948
1969 def addchangegroup(self, source, srctype, url, emptyok=False):
1949 def addchangegroup(self, source, srctype, url, emptyok=False):
1970 """add changegroup to repo.
1950 """add changegroup to repo.
1971
1951
1972 return values:
1952 return values:
1973 - nothing changed or no source: 0
1953 - nothing changed or no source: 0
1974 - more heads than before: 1+added heads (2..n)
1954 - more heads than before: 1+added heads (2..n)
1975 - less heads than before: -1-removed heads (-2..-n)
1955 - less heads than before: -1-removed heads (-2..-n)
1976 - number of heads stays the same: 1
1956 - number of heads stays the same: 1
1977 """
1957 """
1978 def csmap(x):
1958 def csmap(x):
1979 self.ui.debug(_("add changeset %s\n") % short(x))
1959 self.ui.debug(_("add changeset %s\n") % short(x))
1980 return len(cl)
1960 return len(cl)
1981
1961
1982 def revmap(x):
1962 def revmap(x):
1983 return cl.rev(x)
1963 return cl.rev(x)
1984
1964
1985 if not source:
1965 if not source:
1986 return 0
1966 return 0
1987
1967
1988 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1968 self.hook('prechangegroup', throw=True, source=srctype, url=url)
1989
1969
1990 changesets = files = revisions = 0
1970 changesets = files = revisions = 0
1991
1971
1992 # write changelog data to temp files so concurrent readers will not see
1972 # write changelog data to temp files so concurrent readers will not see
1993 # inconsistent view
1973 # inconsistent view
1994 cl = self.changelog
1974 cl = self.changelog
1995 cl.delayupdate()
1975 cl.delayupdate()
1996 oldheads = len(cl.heads())
1976 oldheads = len(cl.heads())
1997
1977
1998 tr = self.transaction()
1978 tr = self.transaction()
1999 try:
1979 try:
2000 trp = weakref.proxy(tr)
1980 trp = weakref.proxy(tr)
2001 # pull off the changeset group
1981 # pull off the changeset group
2002 self.ui.status(_("adding changesets\n"))
1982 self.ui.status(_("adding changesets\n"))
2003 cor = len(cl) - 1
1983 cor = len(cl) - 1
2004 chunkiter = changegroup.chunkiter(source)
1984 chunkiter = changegroup.chunkiter(source)
2005 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
1985 if cl.addgroup(chunkiter, csmap, trp) is None and not emptyok:
2006 raise util.Abort(_("received changelog group is empty"))
1986 raise util.Abort(_("received changelog group is empty"))
2007 cnr = len(cl) - 1
1987 cnr = len(cl) - 1
2008 changesets = cnr - cor
1988 changesets = cnr - cor
2009
1989
2010 # pull off the manifest group
1990 # pull off the manifest group
2011 self.ui.status(_("adding manifests\n"))
1991 self.ui.status(_("adding manifests\n"))
2012 chunkiter = changegroup.chunkiter(source)
1992 chunkiter = changegroup.chunkiter(source)
2013 # no need to check for empty manifest group here:
1993 # no need to check for empty manifest group here:
2014 # if the result of the merge of 1 and 2 is the same in 3 and 4,
1994 # if the result of the merge of 1 and 2 is the same in 3 and 4,
2015 # no new manifest will be created and the manifest group will
1995 # no new manifest will be created and the manifest group will
2016 # be empty during the pull
1996 # be empty during the pull
2017 self.manifest.addgroup(chunkiter, revmap, trp)
1997 self.manifest.addgroup(chunkiter, revmap, trp)
2018
1998
2019 # process the files
1999 # process the files
2020 self.ui.status(_("adding file changes\n"))
2000 self.ui.status(_("adding file changes\n"))
2021 while 1:
2001 while 1:
2022 f = changegroup.getchunk(source)
2002 f = changegroup.getchunk(source)
2023 if not f:
2003 if not f:
2024 break
2004 break
2025 self.ui.debug(_("adding %s revisions\n") % f)
2005 self.ui.debug(_("adding %s revisions\n") % f)
2026 fl = self.file(f)
2006 fl = self.file(f)
2027 o = len(fl)
2007 o = len(fl)
2028 chunkiter = changegroup.chunkiter(source)
2008 chunkiter = changegroup.chunkiter(source)
2029 if fl.addgroup(chunkiter, revmap, trp) is None:
2009 if fl.addgroup(chunkiter, revmap, trp) is None:
2030 raise util.Abort(_("received file revlog group is empty"))
2010 raise util.Abort(_("received file revlog group is empty"))
2031 revisions += len(fl) - o
2011 revisions += len(fl) - o
2032 files += 1
2012 files += 1
2033
2013
2034 # make changelog see real files again
2014 # make changelog see real files again
2035 cl.finalize(trp)
2015 cl.finalize(trp)
2036
2016
2037 newheads = len(self.changelog.heads())
2017 newheads = len(self.changelog.heads())
2038 heads = ""
2018 heads = ""
2039 if oldheads and newheads != oldheads:
2019 if oldheads and newheads != oldheads:
2040 heads = _(" (%+d heads)") % (newheads - oldheads)
2020 heads = _(" (%+d heads)") % (newheads - oldheads)
2041
2021
2042 self.ui.status(_("added %d changesets"
2022 self.ui.status(_("added %d changesets"
2043 " with %d changes to %d files%s\n")
2023 " with %d changes to %d files%s\n")
2044 % (changesets, revisions, files, heads))
2024 % (changesets, revisions, files, heads))
2045
2025
2046 if changesets > 0:
2026 if changesets > 0:
2047 self.hook('pretxnchangegroup', throw=True,
2027 self.hook('pretxnchangegroup', throw=True,
2048 node=hex(self.changelog.node(cor+1)), source=srctype,
2028 node=hex(self.changelog.node(cor+1)), source=srctype,
2049 url=url)
2029 url=url)
2050
2030
2051 tr.close()
2031 tr.close()
2052 finally:
2032 finally:
2053 del tr
2033 del tr
2054
2034
2055 if changesets > 0:
2035 if changesets > 0:
2056 # forcefully update the on-disk branch cache
2036 # forcefully update the on-disk branch cache
2057 self.ui.debug(_("updating the branch cache\n"))
2037 self.ui.debug(_("updating the branch cache\n"))
2058 self.branchtags()
2038 self.branchtags()
2059 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2039 self.hook("changegroup", node=hex(self.changelog.node(cor+1)),
2060 source=srctype, url=url)
2040 source=srctype, url=url)
2061
2041
2062 for i in xrange(cor + 1, cnr + 1):
2042 for i in xrange(cor + 1, cnr + 1):
2063 self.hook("incoming", node=hex(self.changelog.node(i)),
2043 self.hook("incoming", node=hex(self.changelog.node(i)),
2064 source=srctype, url=url)
2044 source=srctype, url=url)
2065
2045
2066 # never return 0 here:
2046 # never return 0 here:
2067 if newheads < oldheads:
2047 if newheads < oldheads:
2068 return newheads - oldheads - 1
2048 return newheads - oldheads - 1
2069 else:
2049 else:
2070 return newheads - oldheads + 1
2050 return newheads - oldheads + 1
2071
2051
2072
2052
2073 def stream_in(self, remote):
2053 def stream_in(self, remote):
2074 fp = remote.stream_out()
2054 fp = remote.stream_out()
2075 l = fp.readline()
2055 l = fp.readline()
2076 try:
2056 try:
2077 resp = int(l)
2057 resp = int(l)
2078 except ValueError:
2058 except ValueError:
2079 raise error.ResponseError(
2059 raise error.ResponseError(
2080 _('Unexpected response from remote server:'), l)
2060 _('Unexpected response from remote server:'), l)
2081 if resp == 1:
2061 if resp == 1:
2082 raise util.Abort(_('operation forbidden by server'))
2062 raise util.Abort(_('operation forbidden by server'))
2083 elif resp == 2:
2063 elif resp == 2:
2084 raise util.Abort(_('locking the remote repository failed'))
2064 raise util.Abort(_('locking the remote repository failed'))
2085 elif resp != 0:
2065 elif resp != 0:
2086 raise util.Abort(_('the server sent an unknown error code'))
2066 raise util.Abort(_('the server sent an unknown error code'))
2087 self.ui.status(_('streaming all changes\n'))
2067 self.ui.status(_('streaming all changes\n'))
2088 l = fp.readline()
2068 l = fp.readline()
2089 try:
2069 try:
2090 total_files, total_bytes = map(int, l.split(' ', 1))
2070 total_files, total_bytes = map(int, l.split(' ', 1))
2091 except (ValueError, TypeError):
2071 except (ValueError, TypeError):
2092 raise error.ResponseError(
2072 raise error.ResponseError(
2093 _('Unexpected response from remote server:'), l)
2073 _('Unexpected response from remote server:'), l)
2094 self.ui.status(_('%d files to transfer, %s of data\n') %
2074 self.ui.status(_('%d files to transfer, %s of data\n') %
2095 (total_files, util.bytecount(total_bytes)))
2075 (total_files, util.bytecount(total_bytes)))
2096 start = time.time()
2076 start = time.time()
2097 for i in xrange(total_files):
2077 for i in xrange(total_files):
2098 # XXX doesn't support '\n' or '\r' in filenames
2078 # XXX doesn't support '\n' or '\r' in filenames
2099 l = fp.readline()
2079 l = fp.readline()
2100 try:
2080 try:
2101 name, size = l.split('\0', 1)
2081 name, size = l.split('\0', 1)
2102 size = int(size)
2082 size = int(size)
2103 except (ValueError, TypeError):
2083 except (ValueError, TypeError):
2104 raise error.ResponseError(
2084 raise error.ResponseError(
2105 _('Unexpected response from remote server:'), l)
2085 _('Unexpected response from remote server:'), l)
2106 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2086 self.ui.debug(_('adding %s (%s)\n') % (name, util.bytecount(size)))
2107 ofp = self.sopener(name, 'w')
2087 ofp = self.sopener(name, 'w')
2108 for chunk in util.filechunkiter(fp, limit=size):
2088 for chunk in util.filechunkiter(fp, limit=size):
2109 ofp.write(chunk)
2089 ofp.write(chunk)
2110 ofp.close()
2090 ofp.close()
2111 elapsed = time.time() - start
2091 elapsed = time.time() - start
2112 if elapsed <= 0:
2092 if elapsed <= 0:
2113 elapsed = 0.001
2093 elapsed = 0.001
2114 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2094 self.ui.status(_('transferred %s in %.1f seconds (%s/sec)\n') %
2115 (util.bytecount(total_bytes), elapsed,
2095 (util.bytecount(total_bytes), elapsed,
2116 util.bytecount(total_bytes / elapsed)))
2096 util.bytecount(total_bytes / elapsed)))
2117 self.invalidate()
2097 self.invalidate()
2118 return len(self.heads()) + 1
2098 return len(self.heads()) + 1
2119
2099
2120 def clone(self, remote, heads=[], stream=False):
2100 def clone(self, remote, heads=[], stream=False):
2121 '''clone remote repository.
2101 '''clone remote repository.
2122
2102
2123 keyword arguments:
2103 keyword arguments:
2124 heads: list of revs to clone (forces use of pull)
2104 heads: list of revs to clone (forces use of pull)
2125 stream: use streaming clone if possible'''
2105 stream: use streaming clone if possible'''
2126
2106
2127 # now, all clients that can request uncompressed clones can
2107 # now, all clients that can request uncompressed clones can
2128 # read repo formats supported by all servers that can serve
2108 # read repo formats supported by all servers that can serve
2129 # them.
2109 # them.
2130
2110
2131 # if revlog format changes, client will have to check version
2111 # if revlog format changes, client will have to check version
2132 # and format flags on "stream" capability, and use
2112 # and format flags on "stream" capability, and use
2133 # uncompressed only if compatible.
2113 # uncompressed only if compatible.
2134
2114
2135 if stream and not heads and remote.capable('stream'):
2115 if stream and not heads and remote.capable('stream'):
2136 return self.stream_in(remote)
2116 return self.stream_in(remote)
2137 return self.pull(remote, heads)
2117 return self.pull(remote, heads)
2138
2118
2139 # used to avoid circular references so destructors work
2119 # used to avoid circular references so destructors work
2140 def aftertrans(files):
2120 def aftertrans(files):
2141 renamefiles = [tuple(t) for t in files]
2121 renamefiles = [tuple(t) for t in files]
2142 def a():
2122 def a():
2143 for src, dest in renamefiles:
2123 for src, dest in renamefiles:
2144 util.rename(src, dest)
2124 util.rename(src, dest)
2145 return a
2125 return a
2146
2126
2147 def instance(ui, path, create):
2127 def instance(ui, path, create):
2148 return localrepository(ui, util.drop_scheme('file', path), create)
2128 return localrepository(ui, util.drop_scheme('file', path), create)
2149
2129
2150 def islocal(path):
2130 def islocal(path):
2151 return True
2131 return True
@@ -1,56 +1,56
1 % before commit
1 % before commit
2 % store can be written by the group, other files cannot
2 % store can be written by the group, other files cannot
3 % store is setgid
3 % store is setgid
4 00700 ./.hg/
4 00700 ./.hg/
5 00600 ./.hg/00changelog.i
5 00600 ./.hg/00changelog.i
6 00600 ./.hg/requires
6 00600 ./.hg/requires
7 00770 ./.hg/store/
7 00770 ./.hg/store/
8
8
9 % after commit
9 % after commit
10 % working dir files can only be written by the owner
10 % working dir files can only be written by the owner
11 % files created in .hg can be written by the group
11 % files created in .hg can be written by the group
12 % (in particular, store/**, dirstate, branch cache file, undo files)
12 % (in particular, store/**, dirstate, branch cache file, undo files)
13 % new directories are setgid
13 % new directories are setgid
14 00700 ./.hg/
14 00700 ./.hg/
15 00600 ./.hg/00changelog.i
15 00600 ./.hg/00changelog.i
16 00660 ./.hg/dirstate
16 00660 ./.hg/dirstate
17 00600 ./.hg/requires
17 00600 ./.hg/requires
18 00770 ./.hg/store/
18 00770 ./.hg/store/
19 00660 ./.hg/store/00changelog.i
19 00660 ./.hg/store/00changelog.i
20 00660 ./.hg/store/00manifest.i
20 00660 ./.hg/store/00manifest.i
21 00770 ./.hg/store/data/
21 00770 ./.hg/store/data/
22 00770 ./.hg/store/data/dir/
22 00770 ./.hg/store/data/dir/
23 00660 ./.hg/store/data/dir/bar.i
23 00660 ./.hg/store/data/dir/bar.i
24 00660 ./.hg/store/data/foo.i
24 00660 ./.hg/store/data/foo.i
25 00660 ./.hg/store/fncache
25 00660 ./.hg/store/fncache
26 00660 ./.hg/store/undo
26 00660 ./.hg/store/undo
27 00660 ./.hg/undo.branch
27 00660 ./.hg/undo.branch
28 00660 ./.hg/undo.dirstate
28 00660 ./.hg/undo.dirstate
29 00700 ./dir/
29 00700 ./dir/
30 00600 ./dir/bar
30 00600 ./dir/bar
31 00600 ./foo
31 00600 ./foo
32
32
33 % before push
33 % before push
34 % group can write everything
34 % group can write everything
35 00770 ../push/.hg/
35 00770 ../push/.hg/
36 00660 ../push/.hg/00changelog.i
36 00660 ../push/.hg/00changelog.i
37 00660 ../push/.hg/requires
37 00660 ../push/.hg/requires
38 00770 ../push/.hg/store/
38 00770 ../push/.hg/store/
39
39
40 % after push
40 % after push
41 % group can still write everything
41 % group can still write everything
42 00770 ../push/.hg/
42 00770 ../push/.hg/
43 00660 ../push/.hg/00changelog.i
43 00660 ../push/.hg/00changelog.i
44 00660 ../push/.hg/branch.cache
44 00660 ../push/.hg/branchheads.cache
45 00660 ../push/.hg/requires
45 00660 ../push/.hg/requires
46 00770 ../push/.hg/store/
46 00770 ../push/.hg/store/
47 00660 ../push/.hg/store/00changelog.i
47 00660 ../push/.hg/store/00changelog.i
48 00660 ../push/.hg/store/00manifest.i
48 00660 ../push/.hg/store/00manifest.i
49 00770 ../push/.hg/store/data/
49 00770 ../push/.hg/store/data/
50 00770 ../push/.hg/store/data/dir/
50 00770 ../push/.hg/store/data/dir/
51 00660 ../push/.hg/store/data/dir/bar.i
51 00660 ../push/.hg/store/data/dir/bar.i
52 00660 ../push/.hg/store/data/foo.i
52 00660 ../push/.hg/store/data/foo.i
53 00660 ../push/.hg/store/fncache
53 00660 ../push/.hg/store/fncache
54 00660 ../push/.hg/store/undo
54 00660 ../push/.hg/store/undo
55 00660 ../push/.hg/undo.branch
55 00660 ../push/.hg/undo.branch
56 00660 ../push/.hg/undo.dirstate
56 00660 ../push/.hg/undo.dirstate
@@ -1,78 +1,78
1 #!/bin/sh
1 #!/bin/sh
2
2
3 branches=.hg/branch.cache
3 branches=.hg/branchheads.cache
4 echo '[extensions]' >> $HGRCPATH
4 echo '[extensions]' >> $HGRCPATH
5 echo 'hgext.mq=' >> $HGRCPATH
5 echo 'hgext.mq=' >> $HGRCPATH
6
6
7 show_branch_cache()
7 show_branch_cache()
8 {
8 {
9 # force cache (re)generation
9 # force cache (re)generation
10 hg log -r does-not-exist 2> /dev/null
10 hg log -r does-not-exist 2> /dev/null
11 hg log -r tip --template 'tip: #rev#\n'
11 hg log -r tip --template 'tip: #rev#\n'
12 if [ -f $branches ]; then
12 if [ -f $branches ]; then
13 sort $branches
13 sort $branches
14 else
14 else
15 echo No branch cache
15 echo No branch cache
16 fi
16 fi
17 if [ "$1" = 1 ]; then
17 if [ "$1" = 1 ]; then
18 for b in foo bar; do
18 for b in foo bar; do
19 hg log -r $b --template "branch $b: "'#rev#\n'
19 hg log -r $b --template "branch $b: "'#rev#\n'
20 done
20 done
21 fi
21 fi
22 }
22 }
23
23
24 hg init a
24 hg init a
25 cd a
25 cd a
26 hg qinit -c
26 hg qinit -c
27
27
28 echo '# mq patch on an empty repo'
28 echo '# mq patch on an empty repo'
29 hg qnew p1
29 hg qnew p1
30 show_branch_cache
30 show_branch_cache
31
31
32 echo > pfile
32 echo > pfile
33 hg add pfile
33 hg add pfile
34 hg qrefresh -m 'patch 1'
34 hg qrefresh -m 'patch 1'
35 show_branch_cache
35 show_branch_cache
36
36
37 echo
37 echo
38 echo '# some regular revisions'
38 echo '# some regular revisions'
39 hg qpop
39 hg qpop
40 echo foo > foo
40 echo foo > foo
41 hg add foo
41 hg add foo
42 echo foo > .hg/branch
42 echo foo > .hg/branch
43 hg ci -m 'branch foo' -d '1000000 0'
43 hg ci -m 'branch foo' -d '1000000 0'
44
44
45 echo bar > bar
45 echo bar > bar
46 hg add bar
46 hg add bar
47 echo bar > .hg/branch
47 echo bar > .hg/branch
48 hg ci -m 'branch bar' -d '1000000 0'
48 hg ci -m 'branch bar' -d '1000000 0'
49 show_branch_cache
49 show_branch_cache
50
50
51 echo
51 echo
52 echo '# add some mq patches'
52 echo '# add some mq patches'
53 hg qpush
53 hg qpush
54 show_branch_cache
54 show_branch_cache
55
55
56 hg qnew p2
56 hg qnew p2
57 echo foo > .hg/branch
57 echo foo > .hg/branch
58 echo foo2 >> foo
58 echo foo2 >> foo
59 hg qrefresh -m 'patch 2'
59 hg qrefresh -m 'patch 2'
60 show_branch_cache 1
60 show_branch_cache 1
61
61
62 echo
62 echo
63 echo '# removing the cache'
63 echo '# removing the cache'
64 rm $branches
64 rm $branches
65 show_branch_cache 1
65 show_branch_cache 1
66
66
67 echo
67 echo
68 echo '# importing rev 1 (the cache now ends in one of the patches)'
68 echo '# importing rev 1 (the cache now ends in one of the patches)'
69 hg qimport -r 1 -n p0
69 hg qimport -r 1 -n p0
70 show_branch_cache 1
70 show_branch_cache 1
71 hg log -r qbase --template 'qbase: #rev#\n'
71 hg log -r qbase --template 'qbase: #rev#\n'
72
72
73 echo
73 echo
74 echo '# detect an invalid cache'
74 echo '# detect an invalid cache'
75 hg qpop -a
75 hg qpop -a
76 hg qpush -a
76 hg qpush -a
77 show_branch_cache
77 show_branch_cache
78
78
@@ -1,110 +1,110
1 #!/bin/sh
1 #!/bin/sh
2
2
3 branchcache=.hg/branch.cache
3 branchcache=.hg/branchheads.cache
4
4
5 hg init t
5 hg init t
6 cd t
6 cd t
7 hg branches
7 hg branches
8
8
9 echo foo > a
9 echo foo > a
10 hg add a
10 hg add a
11 hg ci -m "initial" -d "1000000 0"
11 hg ci -m "initial" -d "1000000 0"
12 hg branch foo
12 hg branch foo
13 hg branch
13 hg branch
14 hg ci -m "add branch name" -d "1000000 0"
14 hg ci -m "add branch name" -d "1000000 0"
15 hg branch bar
15 hg branch bar
16 hg ci -m "change branch name" -d "1000000 0"
16 hg ci -m "change branch name" -d "1000000 0"
17 echo % branch shadowing
17 echo % branch shadowing
18 hg branch default
18 hg branch default
19 hg branch -f default
19 hg branch -f default
20 hg ci -m "clear branch name" -d "1000000 0"
20 hg ci -m "clear branch name" -d "1000000 0"
21
21
22 hg co foo
22 hg co foo
23 hg branch
23 hg branch
24 echo bleah > a
24 echo bleah > a
25 hg ci -m "modify a branch" -d "1000000 0"
25 hg ci -m "modify a branch" -d "1000000 0"
26
26
27 hg merge default
27 hg merge default
28 hg branch
28 hg branch
29 hg ci -m "merge" -d "1000000 0"
29 hg ci -m "merge" -d "1000000 0"
30 hg log
30 hg log
31
31
32 hg branches
32 hg branches
33 hg branches -q
33 hg branches -q
34
34
35 echo % test for invalid branch cache
35 echo % test for invalid branch cache
36 hg rollback
36 hg rollback
37 cp $branchcache .hg/bc-invalid
37 cp $branchcache .hg/bc-invalid
38 hg log -r foo
38 hg log -r foo
39 cp .hg/bc-invalid $branchcache
39 cp .hg/bc-invalid $branchcache
40 hg --debug log -r foo
40 hg --debug log -r foo
41 rm $branchcache
41 rm $branchcache
42 echo corrupted > $branchcache
42 echo corrupted > $branchcache
43 hg log -qr foo
43 hg log -qr foo
44 cat $branchcache
44 cat $branchcache
45
45
46 echo % push should update the branch cache
46 echo % push should update the branch cache
47 hg init ../target
47 hg init ../target
48 echo % pushing just rev 0
48 echo % pushing just rev 0
49 hg push -qr 0 ../target
49 hg push -qr 0 ../target
50 cat ../target/$branchcache
50 cat ../target/$branchcache
51 echo % pushing everything
51 echo % pushing everything
52 hg push -qf ../target
52 hg push -qf ../target
53 cat ../target/$branchcache
53 cat ../target/$branchcache
54
54
55 echo % update with no arguments: tipmost revision of the current branch
55 echo % update with no arguments: tipmost revision of the current branch
56 hg up -q -C 0
56 hg up -q -C 0
57 hg up -q
57 hg up -q
58 hg id
58 hg id
59 hg up -q 1
59 hg up -q 1
60 hg up -q
60 hg up -q
61 hg id
61 hg id
62 hg branch foobar
62 hg branch foobar
63 hg up
63 hg up
64
64
65 echo % fastforward merge
65 echo % fastforward merge
66 hg branch ff
66 hg branch ff
67 echo ff > ff
67 echo ff > ff
68 hg ci -Am'fast forward' -d '1000000 0'
68 hg ci -Am'fast forward' -d '1000000 0'
69 hg up foo
69 hg up foo
70 hg merge ff
70 hg merge ff
71 hg branch
71 hg branch
72 hg commit -m'Merge ff into foo' -d '1000000 0'
72 hg commit -m'Merge ff into foo' -d '1000000 0'
73 hg parents
73 hg parents
74 hg manifest
74 hg manifest
75
75
76 echo % test merging, add 3 default heads and one test head
76 echo % test merging, add 3 default heads and one test head
77 cd ..
77 cd ..
78 hg init merges
78 hg init merges
79 cd merges
79 cd merges
80 echo a > a
80 echo a > a
81 hg ci -Ama
81 hg ci -Ama
82
82
83 echo b > b
83 echo b > b
84 hg ci -Amb
84 hg ci -Amb
85
85
86 hg up 0
86 hg up 0
87 echo c > c
87 echo c > c
88 hg ci -Amc
88 hg ci -Amc
89
89
90 hg up 0
90 hg up 0
91 echo d > d
91 echo d > d
92 hg ci -Amd
92 hg ci -Amd
93
93
94 hg up 0
94 hg up 0
95 hg branch test
95 hg branch test
96 echo e >> e
96 echo e >> e
97 hg ci -Ame
97 hg ci -Ame
98
98
99 hg log
99 hg log
100
100
101 echo % implicit merge with test branch as parent
101 echo % implicit merge with test branch as parent
102 hg merge
102 hg merge
103 hg up -C default
103 hg up -C default
104 echo % implicit merge with default branch as parent
104 echo % implicit merge with default branch as parent
105 hg merge
105 hg merge
106 echo % 3 branch heads, explicit merge required
106 echo % 3 branch heads, explicit merge required
107 hg merge 2
107 hg merge 2
108 hg ci -m merge
108 hg ci -m merge
109 echo % 2 branch heads, implicit merge works
109 echo % 2 branch heads, implicit merge works
110 hg merge
110 hg merge
@@ -1,172 +1,174
1 marked working directory as branch foo
1 marked working directory as branch foo
2 foo
2 foo
3 marked working directory as branch bar
3 marked working directory as branch bar
4 % branch shadowing
4 % branch shadowing
5 abort: a branch of the same name already exists (use --force to override)
5 abort: a branch of the same name already exists (use --force to override)
6 marked working directory as branch default
6 marked working directory as branch default
7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
7 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
8 foo
8 foo
9 created new head
9 created new head
10 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
10 0 files updated, 0 files merged, 0 files removed, 0 files unresolved
11 (branch merge, don't forget to commit)
11 (branch merge, don't forget to commit)
12 foo
12 foo
13 changeset: 5:5f8fb06e083e
13 changeset: 5:5f8fb06e083e
14 branch: foo
14 branch: foo
15 tag: tip
15 tag: tip
16 parent: 4:4909a3732169
16 parent: 4:4909a3732169
17 parent: 3:bf1bc2f45e83
17 parent: 3:bf1bc2f45e83
18 user: test
18 user: test
19 date: Mon Jan 12 13:46:40 1970 +0000
19 date: Mon Jan 12 13:46:40 1970 +0000
20 summary: merge
20 summary: merge
21
21
22 changeset: 4:4909a3732169
22 changeset: 4:4909a3732169
23 branch: foo
23 branch: foo
24 parent: 1:b699b1cec9c2
24 parent: 1:b699b1cec9c2
25 user: test
25 user: test
26 date: Mon Jan 12 13:46:40 1970 +0000
26 date: Mon Jan 12 13:46:40 1970 +0000
27 summary: modify a branch
27 summary: modify a branch
28
28
29 changeset: 3:bf1bc2f45e83
29 changeset: 3:bf1bc2f45e83
30 user: test
30 user: test
31 date: Mon Jan 12 13:46:40 1970 +0000
31 date: Mon Jan 12 13:46:40 1970 +0000
32 summary: clear branch name
32 summary: clear branch name
33
33
34 changeset: 2:67ec16bde7f1
34 changeset: 2:67ec16bde7f1
35 branch: bar
35 branch: bar
36 user: test
36 user: test
37 date: Mon Jan 12 13:46:40 1970 +0000
37 date: Mon Jan 12 13:46:40 1970 +0000
38 summary: change branch name
38 summary: change branch name
39
39
40 changeset: 1:b699b1cec9c2
40 changeset: 1:b699b1cec9c2
41 branch: foo
41 branch: foo
42 user: test
42 user: test
43 date: Mon Jan 12 13:46:40 1970 +0000
43 date: Mon Jan 12 13:46:40 1970 +0000
44 summary: add branch name
44 summary: add branch name
45
45
46 changeset: 0:be8523e69bf8
46 changeset: 0:be8523e69bf8
47 user: test
47 user: test
48 date: Mon Jan 12 13:46:40 1970 +0000
48 date: Mon Jan 12 13:46:40 1970 +0000
49 summary: initial
49 summary: initial
50
50
51 foo 5:5f8fb06e083e
51 foo 5:5f8fb06e083e
52 default 3:bf1bc2f45e83 (inactive)
52 default 3:bf1bc2f45e83 (inactive)
53 bar 2:67ec16bde7f1 (inactive)
53 bar 2:67ec16bde7f1 (inactive)
54 foo
54 foo
55 default
55 default
56 bar
56 bar
57 % test for invalid branch cache
57 % test for invalid branch cache
58 rolling back last transaction
58 rolling back last transaction
59 changeset: 4:4909a3732169
59 changeset: 4:4909a3732169
60 branch: foo
60 branch: foo
61 tag: tip
61 tag: tip
62 parent: 1:b699b1cec9c2
62 parent: 1:b699b1cec9c2
63 user: test
63 user: test
64 date: Mon Jan 12 13:46:40 1970 +0000
64 date: Mon Jan 12 13:46:40 1970 +0000
65 summary: modify a branch
65 summary: modify a branch
66
66
67 invalidating branch cache (tip differs)
67 invalidating branch cache (tip differs)
68 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
68 changeset: 4:4909a3732169c0c20011c4f4b8fdff4e3d89b23f
69 branch: foo
69 branch: foo
70 tag: tip
70 tag: tip
71 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
71 parent: 1:b699b1cec9c2966b3700de4fef0dc123cd754c31
72 parent: -1:0000000000000000000000000000000000000000
72 parent: -1:0000000000000000000000000000000000000000
73 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
73 manifest: 4:d01b250baaa05909152f7ae07d7a649deea0df9a
74 user: test
74 user: test
75 date: Mon Jan 12 13:46:40 1970 +0000
75 date: Mon Jan 12 13:46:40 1970 +0000
76 files: a
76 files: a
77 extra: branch=foo
77 extra: branch=foo
78 description:
78 description:
79 modify a branch
79 modify a branch
80
80
81
81
82 4:4909a3732169
82 4:4909a3732169
83 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
83 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
84 be8523e69bf892e25817fc97187516b3c0804ae4 default
84 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
85 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
85 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
86 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
86 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
87 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
87 % push should update the branch cache
88 % push should update the branch cache
88 % pushing just rev 0
89 % pushing just rev 0
89 be8523e69bf892e25817fc97187516b3c0804ae4 0
90 be8523e69bf892e25817fc97187516b3c0804ae4 0
90 be8523e69bf892e25817fc97187516b3c0804ae4 default
91 be8523e69bf892e25817fc97187516b3c0804ae4 default
91 % pushing everything
92 % pushing everything
92 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
93 4909a3732169c0c20011c4f4b8fdff4e3d89b23f 4
94 be8523e69bf892e25817fc97187516b3c0804ae4 default
93 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
95 bf1bc2f45e834c75404d0ddab57d53beab56e2f8 default
94 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
96 4909a3732169c0c20011c4f4b8fdff4e3d89b23f foo
95 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
97 67ec16bde7f1575d523313b9bca000f6a6f12dca bar
96 % update with no arguments: tipmost revision of the current branch
98 % update with no arguments: tipmost revision of the current branch
97 bf1bc2f45e83
99 bf1bc2f45e83
98 4909a3732169 (foo) tip
100 4909a3732169 (foo) tip
99 marked working directory as branch foobar
101 marked working directory as branch foobar
100 abort: branch foobar not found
102 abort: branch foobar not found
101 % fastforward merge
103 % fastforward merge
102 marked working directory as branch ff
104 marked working directory as branch ff
103 adding ff
105 adding ff
104 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
106 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
105 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
107 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
106 (branch merge, don't forget to commit)
108 (branch merge, don't forget to commit)
107 foo
109 foo
108 changeset: 6:f0c74f92a385
110 changeset: 6:f0c74f92a385
109 branch: foo
111 branch: foo
110 tag: tip
112 tag: tip
111 parent: 4:4909a3732169
113 parent: 4:4909a3732169
112 parent: 5:c420d2121b71
114 parent: 5:c420d2121b71
113 user: test
115 user: test
114 date: Mon Jan 12 13:46:40 1970 +0000
116 date: Mon Jan 12 13:46:40 1970 +0000
115 summary: Merge ff into foo
117 summary: Merge ff into foo
116
118
117 a
119 a
118 ff
120 ff
119 % test merging, add 3 default heads and one test head
121 % test merging, add 3 default heads and one test head
120 adding a
122 adding a
121 adding b
123 adding b
122 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
124 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
123 adding c
125 adding c
124 created new head
126 created new head
125 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
127 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
126 adding d
128 adding d
127 created new head
129 created new head
128 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
130 0 files updated, 0 files merged, 1 files removed, 0 files unresolved
129 marked working directory as branch test
131 marked working directory as branch test
130 adding e
132 adding e
131 created new head
133 created new head
132 changeset: 4:3a1e01ed1df4
134 changeset: 4:3a1e01ed1df4
133 branch: test
135 branch: test
134 tag: tip
136 tag: tip
135 parent: 0:cb9a9f314b8b
137 parent: 0:cb9a9f314b8b
136 user: test
138 user: test
137 date: Thu Jan 01 00:00:00 1970 +0000
139 date: Thu Jan 01 00:00:00 1970 +0000
138 summary: e
140 summary: e
139
141
140 changeset: 3:980f7dc84c29
142 changeset: 3:980f7dc84c29
141 parent: 0:cb9a9f314b8b
143 parent: 0:cb9a9f314b8b
142 user: test
144 user: test
143 date: Thu Jan 01 00:00:00 1970 +0000
145 date: Thu Jan 01 00:00:00 1970 +0000
144 summary: d
146 summary: d
145
147
146 changeset: 2:d36c0562f908
148 changeset: 2:d36c0562f908
147 parent: 0:cb9a9f314b8b
149 parent: 0:cb9a9f314b8b
148 user: test
150 user: test
149 date: Thu Jan 01 00:00:00 1970 +0000
151 date: Thu Jan 01 00:00:00 1970 +0000
150 summary: c
152 summary: c
151
153
152 changeset: 1:d2ae7f538514
154 changeset: 1:d2ae7f538514
153 user: test
155 user: test
154 date: Thu Jan 01 00:00:00 1970 +0000
156 date: Thu Jan 01 00:00:00 1970 +0000
155 summary: b
157 summary: b
156
158
157 changeset: 0:cb9a9f314b8b
159 changeset: 0:cb9a9f314b8b
158 user: test
160 user: test
159 date: Thu Jan 01 00:00:00 1970 +0000
161 date: Thu Jan 01 00:00:00 1970 +0000
160 summary: a
162 summary: a
161
163
162 % implicit merge with test branch as parent
164 % implicit merge with test branch as parent
163 abort: branch 'test' has one head - please merge with an explicit rev
165 abort: branch 'test' has one head - please merge with an explicit rev
164 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
166 1 files updated, 0 files merged, 1 files removed, 0 files unresolved
165 % implicit merge with default branch as parent
167 % implicit merge with default branch as parent
166 abort: branch 'default' has 3 heads - please merge with an explicit rev
168 abort: branch 'default' has 3 heads - please merge with an explicit rev
167 % 3 branch heads, explicit merge required
169 % 3 branch heads, explicit merge required
168 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
170 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
169 (branch merge, don't forget to commit)
171 (branch merge, don't forget to commit)
170 % 2 branch heads, implicit merge works
172 % 2 branch heads, implicit merge works
171 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
173 1 files updated, 0 files merged, 0 files removed, 0 files unresolved
172 (branch merge, don't forget to commit)
174 (branch merge, don't forget to commit)
General Comments 0
You need to be logged in to leave comments. Login now