##// END OF EJS Templates
give more info to hgmerge script.
Vadim Gelfer -
r1883:b98160cf default
parent child Browse files
Show More
@@ -1,179 +1,183
1 #!/bin/sh
1 #!/bin/sh
2 #
2 #
3 # hgmerge - default merge helper for Mercurial
3 # hgmerge - default merge helper for Mercurial
4 #
4 #
5 # This tries to find a way to do three-way merge on the current system.
5 # This tries to find a way to do three-way merge on the current system.
6 # The result ought to end up in $1.
6 # The result ought to end up in $1.
7 #
8 # Environment variables set by Mercurial:
9 # HG_ROOT repo root
10 # HG_FILE name of file within repo
11 # HG_MY_NODE revision being merged
12 # HG_OTHER_NODE revision being merged
7
13
8 set -e # bail out quickly on failure
14 set -e # bail out quickly on failure
9
15
10 echo $1 $2 $3
11
12 LOCAL="$1"
16 LOCAL="$1"
13 BASE="$2"
17 BASE="$2"
14 OTHER="$3"
18 OTHER="$3"
15
19
16 if [ -z "$EDITOR" ]; then
20 if [ -z "$EDITOR" ]; then
17 EDITOR="vi"
21 EDITOR="vi"
18 fi
22 fi
19
23
20 # find decent versions of our utilities, insisting on the GNU versions where we
24 # find decent versions of our utilities, insisting on the GNU versions where we
21 # need to
25 # need to
22 MERGE="merge"
26 MERGE="merge"
23 DIFF3="gdiff3"
27 DIFF3="gdiff3"
24 DIFF="gdiff"
28 DIFF="gdiff"
25 PATCH="gpatch"
29 PATCH="gpatch"
26
30
27 type "$MERGE" >/dev/null 2>&1 || MERGE=
31 type "$MERGE" >/dev/null 2>&1 || MERGE=
28 type "$DIFF3" >/dev/null 2>&1 || DIFF3="diff3"
32 type "$DIFF3" >/dev/null 2>&1 || DIFF3="diff3"
29 $DIFF3 --version >/dev/null 2>&1 || DIFF3=
33 $DIFF3 --version >/dev/null 2>&1 || DIFF3=
30 type "$DIFF" >/dev/null 2>&1 || DIFF="diff"
34 type "$DIFF" >/dev/null 2>&1 || DIFF="diff"
31 type "$DIFF" >/dev/null 2>&1 || DIFF=
35 type "$DIFF" >/dev/null 2>&1 || DIFF=
32 type "$PATCH" >/dev/null 2>&1 || PATCH="patch"
36 type "$PATCH" >/dev/null 2>&1 || PATCH="patch"
33 type "$PATCH" >/dev/null 2>&1 || PATCH=
37 type "$PATCH" >/dev/null 2>&1 || PATCH=
34
38
35 # find optional visual utilities
39 # find optional visual utilities
36 FILEMERGE="/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge"
40 FILEMERGE="/Developer/Applications/Utilities/FileMerge.app/Contents/MacOS/FileMerge"
37 KDIFF3="kdiff3"
41 KDIFF3="kdiff3"
38 TKDIFF="tkdiff"
42 TKDIFF="tkdiff"
39 MELD="meld"
43 MELD="meld"
40
44
41 type "$FILEMERGE" >/dev/null 2>&1 || FILEMERGE=
45 type "$FILEMERGE" >/dev/null 2>&1 || FILEMERGE=
42 type "$KDIFF3" >/dev/null 2>&1 || KDIFF3=
46 type "$KDIFF3" >/dev/null 2>&1 || KDIFF3=
43 type "$TKDIFF" >/dev/null 2>&1 || TKDIFF=
47 type "$TKDIFF" >/dev/null 2>&1 || TKDIFF=
44 type "$MELD" >/dev/null 2>&1 || MELD=
48 type "$MELD" >/dev/null 2>&1 || MELD=
45
49
46 # random part of names
50 # random part of names
47 RAND="$RANDOM$RANDOM"
51 RAND="$RANDOM$RANDOM"
48
52
49 # temporary directory for diff+patch merge
53 # temporary directory for diff+patch merge
50 HGTMP="${TMPDIR-/tmp}/hgmerge.$RAND"
54 HGTMP="${TMPDIR-/tmp}/hgmerge.$RAND"
51
55
52 # backup file
56 # backup file
53 BACKUP="$LOCAL.orig.$RAND"
57 BACKUP="$LOCAL.orig.$RAND"
54
58
55 # file used to test for file change
59 # file used to test for file change
56 CHGTEST="$LOCAL.chg.$RAND"
60 CHGTEST="$LOCAL.chg.$RAND"
57
61
58 # put all your required cleanup here
62 # put all your required cleanup here
59 cleanup() {
63 cleanup() {
60 rm -f "$BACKUP" "$CHGTEST"
64 rm -f "$BACKUP" "$CHGTEST"
61 rm -rf "$HGTMP"
65 rm -rf "$HGTMP"
62 }
66 }
63
67
64 # functions concerning program exit
68 # functions concerning program exit
65 success() {
69 success() {
66 cleanup
70 cleanup
67 exit 0
71 exit 0
68 }
72 }
69
73
70 failure() {
74 failure() {
71 echo "merge failed" 1>&2
75 echo "merge failed" 1>&2
72 mv "$BACKUP" "$LOCAL"
76 mv "$BACKUP" "$LOCAL"
73 cleanup
77 cleanup
74 exit 1
78 exit 1
75 }
79 }
76
80
77 # Ask if the merge was successful
81 # Ask if the merge was successful
78 ask_if_merged() {
82 ask_if_merged() {
79 while true; do
83 while true; do
80 echo "$LOCAL seems unchanged."
84 echo "$LOCAL seems unchanged."
81 echo "Was the merge successful? [y/n]"
85 echo "Was the merge successful? [y/n]"
82 read answer
86 read answer
83 case "$answer" in
87 case "$answer" in
84 y*|Y*) success;;
88 y*|Y*) success;;
85 n*|N*) failure;;
89 n*|N*) failure;;
86 esac
90 esac
87 done
91 done
88 }
92 }
89
93
90 # Clean up when interrupted
94 # Clean up when interrupted
91 trap "failure" 1 2 3 6 15 # HUP INT QUIT ABRT TERM
95 trap "failure" 1 2 3 6 15 # HUP INT QUIT ABRT TERM
92
96
93 # Back up our file (and try hard to keep the mtime unchanged)
97 # Back up our file (and try hard to keep the mtime unchanged)
94 mv "$LOCAL" "$BACKUP"
98 mv "$LOCAL" "$BACKUP"
95 cp "$BACKUP" "$LOCAL"
99 cp "$BACKUP" "$LOCAL"
96
100
97 # Attempt to do a non-interactive merge
101 # Attempt to do a non-interactive merge
98 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
102 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
99 if [ -n "$MERGE" ]; then
103 if [ -n "$MERGE" ]; then
100 $MERGE "$LOCAL" "$BASE" "$OTHER" 2> /dev/null && success
104 $MERGE "$LOCAL" "$BASE" "$OTHER" 2> /dev/null && success
101 elif [ -n "$DIFF3" ]; then
105 elif [ -n "$DIFF3" ]; then
102 $DIFF3 -m "$BACKUP" "$BASE" "$OTHER" > "$LOCAL" && success
106 $DIFF3 -m "$BACKUP" "$BASE" "$OTHER" > "$LOCAL" && success
103 fi
107 fi
104 if [ $? -gt 1 ]; then
108 if [ $? -gt 1 ]; then
105 echo "automatic merge failed! Exiting." 1>&2
109 echo "automatic merge failed! Exiting." 1>&2
106 failure
110 failure
107 fi
111 fi
108 fi
112 fi
109
113
110 # on MacOS X try FileMerge.app, shipped with Apple's developer tools
114 # on MacOS X try FileMerge.app, shipped with Apple's developer tools
111 if [ -n "$FILEMERGE" ]; then
115 if [ -n "$FILEMERGE" ]; then
112 cp "$BACKUP" "$LOCAL"
116 cp "$BACKUP" "$LOCAL"
113 cp "$BACKUP" "$CHGTEST"
117 cp "$BACKUP" "$CHGTEST"
114 # filemerge prefers the right by default
118 # filemerge prefers the right by default
115 $FILEMERGE -left "$OTHER" -right "$LOCAL" -ancestor "$BASE" -merge "$LOCAL"
119 $FILEMERGE -left "$OTHER" -right "$LOCAL" -ancestor "$BASE" -merge "$LOCAL"
116 [ $? -ne 0 ] && echo "FileMerge failed to launch" && failure
120 [ $? -ne 0 ] && echo "FileMerge failed to launch" && failure
117 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
121 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
118 fi
122 fi
119
123
120 if [ -n "$DISPLAY" ]; then
124 if [ -n "$DISPLAY" ]; then
121 # try using kdiff3, which is fairly nice
125 # try using kdiff3, which is fairly nice
122 if [ -n "$KDIFF3" ]; then
126 if [ -n "$KDIFF3" ]; then
123 $KDIFF3 --auto "$BASE" "$BACKUP" "$OTHER" -o "$LOCAL" || failure
127 $KDIFF3 --auto "$BASE" "$BACKUP" "$OTHER" -o "$LOCAL" || failure
124 success
128 success
125 fi
129 fi
126
130
127 # try using tkdiff, which is a bit less sophisticated
131 # try using tkdiff, which is a bit less sophisticated
128 if [ -n "$TKDIFF" ]; then
132 if [ -n "$TKDIFF" ]; then
129 $TKDIFF "$BACKUP" "$OTHER" -a "$BASE" -o "$LOCAL" || failure
133 $TKDIFF "$BACKUP" "$OTHER" -a "$BASE" -o "$LOCAL" || failure
130 success
134 success
131 fi
135 fi
132
136
133 if [ -n "$MELD" ]; then
137 if [ -n "$MELD" ]; then
134 cp "$BACKUP" "$CHGTEST"
138 cp "$BACKUP" "$CHGTEST"
135 # protect our feet - meld allows us to save to the left file
139 # protect our feet - meld allows us to save to the left file
136 cp "$BACKUP" "$LOCAL.tmp.$RAND"
140 cp "$BACKUP" "$LOCAL.tmp.$RAND"
137 # Meld doesn't have automatic merging, so to reduce intervention
141 # Meld doesn't have automatic merging, so to reduce intervention
138 # use the file with conflicts
142 # use the file with conflicts
139 $MELD "$LOCAL.tmp.$RAND" "$LOCAL" "$OTHER" || failure
143 $MELD "$LOCAL.tmp.$RAND" "$LOCAL" "$OTHER" || failure
140 # Also it doesn't return good error code
144 # Also it doesn't return good error code
141 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
145 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
142 fi
146 fi
143 fi
147 fi
144
148
145 # Attempt to do a merge with $EDITOR
149 # Attempt to do a merge with $EDITOR
146 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
150 if [ -n "$MERGE" -o -n "$DIFF3" ]; then
147 echo "conflicts detected in $LOCAL"
151 echo "conflicts detected in $LOCAL"
148 cp "$BACKUP" "$CHGTEST"
152 cp "$BACKUP" "$CHGTEST"
149 $EDITOR "$LOCAL" || failure
153 $EDITOR "$LOCAL" || failure
150 # Some editors do not return meaningful error codes
154 # Some editors do not return meaningful error codes
151 # Do not take any chances
155 # Do not take any chances
152 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
156 test "$LOCAL" -nt "$CHGTEST" && success || ask_if_merged
153 fi
157 fi
154
158
155 # attempt to manually merge with diff and patch
159 # attempt to manually merge with diff and patch
156 if [ -n "$DIFF" -a -n "$PATCH" ]; then
160 if [ -n "$DIFF" -a -n "$PATCH" ]; then
157
161
158 (umask 077 && mkdir "$HGTMP") || {
162 (umask 077 && mkdir "$HGTMP") || {
159 echo "Could not create temporary directory $HGTMP" 1>&2
163 echo "Could not create temporary directory $HGTMP" 1>&2
160 failure
164 failure
161 }
165 }
162
166
163 $DIFF -u "$BASE" "$OTHER" > "$HGTMP/diff" || :
167 $DIFF -u "$BASE" "$OTHER" > "$HGTMP/diff" || :
164 if $PATCH "$LOCAL" < "$HGTMP/diff"; then
168 if $PATCH "$LOCAL" < "$HGTMP/diff"; then
165 success
169 success
166 else
170 else
167 # If rejects are empty after using the editor, merge was ok
171 # If rejects are empty after using the editor, merge was ok
168 $EDITOR "$LOCAL" "$LOCAL.rej" || failure
172 $EDITOR "$LOCAL" "$LOCAL.rej" || failure
169 test -s "$LOCAL.rej" || success
173 test -s "$LOCAL.rej" || success
170 fi
174 fi
171 failure
175 failure
172 fi
176 fi
173
177
174 echo
178 echo
175 echo "hgmerge: unable to find any merge utility!"
179 echo "hgmerge: unable to find any merge utility!"
176 echo "supported programs:"
180 echo "supported programs:"
177 echo "merge, FileMerge, tkdiff, kdiff3, meld, diff+patch"
181 echo "merge, FileMerge, tkdiff, kdiff3, meld, diff+patch"
178 echo
182 echo
179 failure
183 failure
@@ -1,1887 +1,1896
1 # localrepo.py - read/write repository class for mercurial
1 # localrepo.py - read/write repository class for mercurial
2 #
2 #
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
3 # Copyright 2005 Matt Mackall <mpm@selenic.com>
4 #
4 #
5 # This software may be used and distributed according to the terms
5 # This software may be used and distributed according to the terms
6 # of the GNU General Public License, incorporated herein by reference.
6 # of the GNU General Public License, incorporated herein by reference.
7
7
8 import struct, os, util
8 import struct, os, util
9 import filelog, manifest, changelog, dirstate, repo
9 import filelog, manifest, changelog, dirstate, repo
10 from node import *
10 from node import *
11 from i18n import gettext as _
11 from i18n import gettext as _
12 from demandload import *
12 from demandload import *
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
13 demandload(globals(), "re lock transaction tempfile stat mdiff errno ui")
14
14
15 class localrepository(object):
15 class localrepository(object):
16 def __del__(self):
16 def __del__(self):
17 self.transhandle = None
17 self.transhandle = None
18 def __init__(self, parentui, path=None, create=0):
18 def __init__(self, parentui, path=None, create=0):
19 if not path:
19 if not path:
20 p = os.getcwd()
20 p = os.getcwd()
21 while not os.path.isdir(os.path.join(p, ".hg")):
21 while not os.path.isdir(os.path.join(p, ".hg")):
22 oldp = p
22 oldp = p
23 p = os.path.dirname(p)
23 p = os.path.dirname(p)
24 if p == oldp:
24 if p == oldp:
25 raise repo.RepoError(_("no repo found"))
25 raise repo.RepoError(_("no repo found"))
26 path = p
26 path = p
27 self.path = os.path.join(path, ".hg")
27 self.path = os.path.join(path, ".hg")
28
28
29 if not create and not os.path.isdir(self.path):
29 if not create and not os.path.isdir(self.path):
30 raise repo.RepoError(_("repository %s not found") % path)
30 raise repo.RepoError(_("repository %s not found") % path)
31
31
32 self.root = os.path.abspath(path)
32 self.root = os.path.abspath(path)
33 self.ui = ui.ui(parentui=parentui)
33 self.ui = ui.ui(parentui=parentui)
34 self.opener = util.opener(self.path)
34 self.opener = util.opener(self.path)
35 self.wopener = util.opener(self.root)
35 self.wopener = util.opener(self.root)
36 self.manifest = manifest.manifest(self.opener)
36 self.manifest = manifest.manifest(self.opener)
37 self.changelog = changelog.changelog(self.opener)
37 self.changelog = changelog.changelog(self.opener)
38 self.tagscache = None
38 self.tagscache = None
39 self.nodetagscache = None
39 self.nodetagscache = None
40 self.encodepats = None
40 self.encodepats = None
41 self.decodepats = None
41 self.decodepats = None
42 self.transhandle = None
42 self.transhandle = None
43
43
44 if create:
44 if create:
45 os.mkdir(self.path)
45 os.mkdir(self.path)
46 os.mkdir(self.join("data"))
46 os.mkdir(self.join("data"))
47
47
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
48 self.dirstate = dirstate.dirstate(self.opener, self.ui, self.root)
49 try:
49 try:
50 self.ui.readconfig(self.join("hgrc"))
50 self.ui.readconfig(self.join("hgrc"))
51 except IOError:
51 except IOError:
52 pass
52 pass
53
53
54 def hook(self, name, throw=False, **args):
54 def hook(self, name, throw=False, **args):
55 def runhook(name, cmd):
55 def runhook(name, cmd):
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
56 self.ui.note(_("running hook %s: %s\n") % (name, cmd))
57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
57 env = dict([('HG_' + k.upper(), v) for k, v in args.iteritems()])
58 r = util.system(cmd, environ=env, cwd=self.root)
58 r = util.system(cmd, environ=env, cwd=self.root)
59 if r:
59 if r:
60 desc, r = util.explain_exit(r)
60 desc, r = util.explain_exit(r)
61 if throw:
61 if throw:
62 raise util.Abort(_('%s hook %s') % (name, desc))
62 raise util.Abort(_('%s hook %s') % (name, desc))
63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
63 self.ui.warn(_('error: %s hook %s\n') % (name, desc))
64 return False
64 return False
65 return True
65 return True
66
66
67 r = True
67 r = True
68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
68 hooks = [(hname, cmd) for hname, cmd in self.ui.configitems("hooks")
69 if hname.split(".", 1)[0] == name and cmd]
69 if hname.split(".", 1)[0] == name and cmd]
70 hooks.sort()
70 hooks.sort()
71 for hname, cmd in hooks:
71 for hname, cmd in hooks:
72 r = runhook(hname, cmd) and r
72 r = runhook(hname, cmd) and r
73 return r
73 return r
74
74
75 def tags(self):
75 def tags(self):
76 '''return a mapping of tag to node'''
76 '''return a mapping of tag to node'''
77 if not self.tagscache:
77 if not self.tagscache:
78 self.tagscache = {}
78 self.tagscache = {}
79 def addtag(self, k, n):
79 def addtag(self, k, n):
80 try:
80 try:
81 bin_n = bin(n)
81 bin_n = bin(n)
82 except TypeError:
82 except TypeError:
83 bin_n = ''
83 bin_n = ''
84 self.tagscache[k.strip()] = bin_n
84 self.tagscache[k.strip()] = bin_n
85
85
86 try:
86 try:
87 # read each head of the tags file, ending with the tip
87 # read each head of the tags file, ending with the tip
88 # and add each tag found to the map, with "newer" ones
88 # and add each tag found to the map, with "newer" ones
89 # taking precedence
89 # taking precedence
90 fl = self.file(".hgtags")
90 fl = self.file(".hgtags")
91 h = fl.heads()
91 h = fl.heads()
92 h.reverse()
92 h.reverse()
93 for r in h:
93 for r in h:
94 for l in fl.read(r).splitlines():
94 for l in fl.read(r).splitlines():
95 if l:
95 if l:
96 n, k = l.split(" ", 1)
96 n, k = l.split(" ", 1)
97 addtag(self, k, n)
97 addtag(self, k, n)
98 except KeyError:
98 except KeyError:
99 pass
99 pass
100
100
101 try:
101 try:
102 f = self.opener("localtags")
102 f = self.opener("localtags")
103 for l in f:
103 for l in f:
104 n, k = l.split(" ", 1)
104 n, k = l.split(" ", 1)
105 addtag(self, k, n)
105 addtag(self, k, n)
106 except IOError:
106 except IOError:
107 pass
107 pass
108
108
109 self.tagscache['tip'] = self.changelog.tip()
109 self.tagscache['tip'] = self.changelog.tip()
110
110
111 return self.tagscache
111 return self.tagscache
112
112
113 def tagslist(self):
113 def tagslist(self):
114 '''return a list of tags ordered by revision'''
114 '''return a list of tags ordered by revision'''
115 l = []
115 l = []
116 for t, n in self.tags().items():
116 for t, n in self.tags().items():
117 try:
117 try:
118 r = self.changelog.rev(n)
118 r = self.changelog.rev(n)
119 except:
119 except:
120 r = -2 # sort to the beginning of the list if unknown
120 r = -2 # sort to the beginning of the list if unknown
121 l.append((r, t, n))
121 l.append((r, t, n))
122 l.sort()
122 l.sort()
123 return [(t, n) for r, t, n in l]
123 return [(t, n) for r, t, n in l]
124
124
125 def nodetags(self, node):
125 def nodetags(self, node):
126 '''return the tags associated with a node'''
126 '''return the tags associated with a node'''
127 if not self.nodetagscache:
127 if not self.nodetagscache:
128 self.nodetagscache = {}
128 self.nodetagscache = {}
129 for t, n in self.tags().items():
129 for t, n in self.tags().items():
130 self.nodetagscache.setdefault(n, []).append(t)
130 self.nodetagscache.setdefault(n, []).append(t)
131 return self.nodetagscache.get(node, [])
131 return self.nodetagscache.get(node, [])
132
132
133 def lookup(self, key):
133 def lookup(self, key):
134 try:
134 try:
135 return self.tags()[key]
135 return self.tags()[key]
136 except KeyError:
136 except KeyError:
137 try:
137 try:
138 return self.changelog.lookup(key)
138 return self.changelog.lookup(key)
139 except:
139 except:
140 raise repo.RepoError(_("unknown revision '%s'") % key)
140 raise repo.RepoError(_("unknown revision '%s'") % key)
141
141
142 def dev(self):
142 def dev(self):
143 return os.stat(self.path).st_dev
143 return os.stat(self.path).st_dev
144
144
145 def local(self):
145 def local(self):
146 return True
146 return True
147
147
148 def join(self, f):
148 def join(self, f):
149 return os.path.join(self.path, f)
149 return os.path.join(self.path, f)
150
150
151 def wjoin(self, f):
151 def wjoin(self, f):
152 return os.path.join(self.root, f)
152 return os.path.join(self.root, f)
153
153
154 def file(self, f):
154 def file(self, f):
155 if f[0] == '/':
155 if f[0] == '/':
156 f = f[1:]
156 f = f[1:]
157 return filelog.filelog(self.opener, f)
157 return filelog.filelog(self.opener, f)
158
158
159 def getcwd(self):
159 def getcwd(self):
160 return self.dirstate.getcwd()
160 return self.dirstate.getcwd()
161
161
162 def wfile(self, f, mode='r'):
162 def wfile(self, f, mode='r'):
163 return self.wopener(f, mode)
163 return self.wopener(f, mode)
164
164
165 def wread(self, filename):
165 def wread(self, filename):
166 if self.encodepats == None:
166 if self.encodepats == None:
167 l = []
167 l = []
168 for pat, cmd in self.ui.configitems("encode"):
168 for pat, cmd in self.ui.configitems("encode"):
169 mf = util.matcher("", "/", [pat], [], [])[1]
169 mf = util.matcher("", "/", [pat], [], [])[1]
170 l.append((mf, cmd))
170 l.append((mf, cmd))
171 self.encodepats = l
171 self.encodepats = l
172
172
173 data = self.wopener(filename, 'r').read()
173 data = self.wopener(filename, 'r').read()
174
174
175 for mf, cmd in self.encodepats:
175 for mf, cmd in self.encodepats:
176 if mf(filename):
176 if mf(filename):
177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
177 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
178 data = util.filter(data, cmd)
178 data = util.filter(data, cmd)
179 break
179 break
180
180
181 return data
181 return data
182
182
183 def wwrite(self, filename, data, fd=None):
183 def wwrite(self, filename, data, fd=None):
184 if self.decodepats == None:
184 if self.decodepats == None:
185 l = []
185 l = []
186 for pat, cmd in self.ui.configitems("decode"):
186 for pat, cmd in self.ui.configitems("decode"):
187 mf = util.matcher("", "/", [pat], [], [])[1]
187 mf = util.matcher("", "/", [pat], [], [])[1]
188 l.append((mf, cmd))
188 l.append((mf, cmd))
189 self.decodepats = l
189 self.decodepats = l
190
190
191 for mf, cmd in self.decodepats:
191 for mf, cmd in self.decodepats:
192 if mf(filename):
192 if mf(filename):
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
193 self.ui.debug(_("filtering %s through %s\n") % (filename, cmd))
194 data = util.filter(data, cmd)
194 data = util.filter(data, cmd)
195 break
195 break
196
196
197 if fd:
197 if fd:
198 return fd.write(data)
198 return fd.write(data)
199 return self.wopener(filename, 'w').write(data)
199 return self.wopener(filename, 'w').write(data)
200
200
201 def transaction(self):
201 def transaction(self):
202 tr = self.transhandle
202 tr = self.transhandle
203 if tr != None and tr.running():
203 if tr != None and tr.running():
204 return tr.nest()
204 return tr.nest()
205
205
206 # save dirstate for undo
206 # save dirstate for undo
207 try:
207 try:
208 ds = self.opener("dirstate").read()
208 ds = self.opener("dirstate").read()
209 except IOError:
209 except IOError:
210 ds = ""
210 ds = ""
211 self.opener("journal.dirstate", "w").write(ds)
211 self.opener("journal.dirstate", "w").write(ds)
212
212
213 tr = transaction.transaction(self.ui.warn, self.opener,
213 tr = transaction.transaction(self.ui.warn, self.opener,
214 self.join("journal"),
214 self.join("journal"),
215 aftertrans(self.path))
215 aftertrans(self.path))
216 self.transhandle = tr
216 self.transhandle = tr
217 return tr
217 return tr
218
218
219 def recover(self):
219 def recover(self):
220 l = self.lock()
220 l = self.lock()
221 if os.path.exists(self.join("journal")):
221 if os.path.exists(self.join("journal")):
222 self.ui.status(_("rolling back interrupted transaction\n"))
222 self.ui.status(_("rolling back interrupted transaction\n"))
223 transaction.rollback(self.opener, self.join("journal"))
223 transaction.rollback(self.opener, self.join("journal"))
224 self.reload()
224 self.reload()
225 return True
225 return True
226 else:
226 else:
227 self.ui.warn(_("no interrupted transaction available\n"))
227 self.ui.warn(_("no interrupted transaction available\n"))
228 return False
228 return False
229
229
230 def undo(self, wlock=None):
230 def undo(self, wlock=None):
231 if not wlock:
231 if not wlock:
232 wlock = self.wlock()
232 wlock = self.wlock()
233 l = self.lock()
233 l = self.lock()
234 if os.path.exists(self.join("undo")):
234 if os.path.exists(self.join("undo")):
235 self.ui.status(_("rolling back last transaction\n"))
235 self.ui.status(_("rolling back last transaction\n"))
236 transaction.rollback(self.opener, self.join("undo"))
236 transaction.rollback(self.opener, self.join("undo"))
237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
237 util.rename(self.join("undo.dirstate"), self.join("dirstate"))
238 self.reload()
238 self.reload()
239 self.wreload()
239 self.wreload()
240 else:
240 else:
241 self.ui.warn(_("no undo information available\n"))
241 self.ui.warn(_("no undo information available\n"))
242
242
243 def wreload(self):
243 def wreload(self):
244 self.dirstate.read()
244 self.dirstate.read()
245
245
246 def reload(self):
246 def reload(self):
247 self.changelog.load()
247 self.changelog.load()
248 self.manifest.load()
248 self.manifest.load()
249 self.tagscache = None
249 self.tagscache = None
250 self.nodetagscache = None
250 self.nodetagscache = None
251
251
252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
252 def do_lock(self, lockname, wait, releasefn=None, acquirefn=None):
253 try:
253 try:
254 l = lock.lock(self.join(lockname), 0, releasefn)
254 l = lock.lock(self.join(lockname), 0, releasefn)
255 except lock.LockHeld, inst:
255 except lock.LockHeld, inst:
256 if not wait:
256 if not wait:
257 raise inst
257 raise inst
258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
258 self.ui.warn(_("waiting for lock held by %s\n") % inst.args[0])
259 try:
259 try:
260 # default to 600 seconds timeout
260 # default to 600 seconds timeout
261 l = lock.lock(self.join(lockname),
261 l = lock.lock(self.join(lockname),
262 int(self.ui.config("ui", "timeout") or 600),
262 int(self.ui.config("ui", "timeout") or 600),
263 releasefn)
263 releasefn)
264 except lock.LockHeld, inst:
264 except lock.LockHeld, inst:
265 raise util.Abort(_("timeout while waiting for "
265 raise util.Abort(_("timeout while waiting for "
266 "lock held by %s") % inst.args[0])
266 "lock held by %s") % inst.args[0])
267 if acquirefn:
267 if acquirefn:
268 acquirefn()
268 acquirefn()
269 return l
269 return l
270
270
271 def lock(self, wait=1):
271 def lock(self, wait=1):
272 return self.do_lock("lock", wait, acquirefn=self.reload)
272 return self.do_lock("lock", wait, acquirefn=self.reload)
273
273
274 def wlock(self, wait=1):
274 def wlock(self, wait=1):
275 return self.do_lock("wlock", wait,
275 return self.do_lock("wlock", wait,
276 self.dirstate.write,
276 self.dirstate.write,
277 self.wreload)
277 self.wreload)
278
278
279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
279 def checkfilemerge(self, filename, text, filelog, manifest1, manifest2):
280 "determine whether a new filenode is needed"
280 "determine whether a new filenode is needed"
281 fp1 = manifest1.get(filename, nullid)
281 fp1 = manifest1.get(filename, nullid)
282 fp2 = manifest2.get(filename, nullid)
282 fp2 = manifest2.get(filename, nullid)
283
283
284 if fp2 != nullid:
284 if fp2 != nullid:
285 # is one parent an ancestor of the other?
285 # is one parent an ancestor of the other?
286 fpa = filelog.ancestor(fp1, fp2)
286 fpa = filelog.ancestor(fp1, fp2)
287 if fpa == fp1:
287 if fpa == fp1:
288 fp1, fp2 = fp2, nullid
288 fp1, fp2 = fp2, nullid
289 elif fpa == fp2:
289 elif fpa == fp2:
290 fp2 = nullid
290 fp2 = nullid
291
291
292 # is the file unmodified from the parent? report existing entry
292 # is the file unmodified from the parent? report existing entry
293 if fp2 == nullid and text == filelog.read(fp1):
293 if fp2 == nullid and text == filelog.read(fp1):
294 return (fp1, None, None)
294 return (fp1, None, None)
295
295
296 return (None, fp1, fp2)
296 return (None, fp1, fp2)
297
297
298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
298 def rawcommit(self, files, text, user, date, p1=None, p2=None, wlock=None):
299 orig_parent = self.dirstate.parents()[0] or nullid
299 orig_parent = self.dirstate.parents()[0] or nullid
300 p1 = p1 or self.dirstate.parents()[0] or nullid
300 p1 = p1 or self.dirstate.parents()[0] or nullid
301 p2 = p2 or self.dirstate.parents()[1] or nullid
301 p2 = p2 or self.dirstate.parents()[1] or nullid
302 c1 = self.changelog.read(p1)
302 c1 = self.changelog.read(p1)
303 c2 = self.changelog.read(p2)
303 c2 = self.changelog.read(p2)
304 m1 = self.manifest.read(c1[0])
304 m1 = self.manifest.read(c1[0])
305 mf1 = self.manifest.readflags(c1[0])
305 mf1 = self.manifest.readflags(c1[0])
306 m2 = self.manifest.read(c2[0])
306 m2 = self.manifest.read(c2[0])
307 changed = []
307 changed = []
308
308
309 if orig_parent == p1:
309 if orig_parent == p1:
310 update_dirstate = 1
310 update_dirstate = 1
311 else:
311 else:
312 update_dirstate = 0
312 update_dirstate = 0
313
313
314 if not wlock:
314 if not wlock:
315 wlock = self.wlock()
315 wlock = self.wlock()
316 l = self.lock()
316 l = self.lock()
317 tr = self.transaction()
317 tr = self.transaction()
318 mm = m1.copy()
318 mm = m1.copy()
319 mfm = mf1.copy()
319 mfm = mf1.copy()
320 linkrev = self.changelog.count()
320 linkrev = self.changelog.count()
321 for f in files:
321 for f in files:
322 try:
322 try:
323 t = self.wread(f)
323 t = self.wread(f)
324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
324 tm = util.is_exec(self.wjoin(f), mfm.get(f, False))
325 r = self.file(f)
325 r = self.file(f)
326 mfm[f] = tm
326 mfm[f] = tm
327
327
328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
328 (entry, fp1, fp2) = self.checkfilemerge(f, t, r, m1, m2)
329 if entry:
329 if entry:
330 mm[f] = entry
330 mm[f] = entry
331 continue
331 continue
332
332
333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
333 mm[f] = r.add(t, {}, tr, linkrev, fp1, fp2)
334 changed.append(f)
334 changed.append(f)
335 if update_dirstate:
335 if update_dirstate:
336 self.dirstate.update([f], "n")
336 self.dirstate.update([f], "n")
337 except IOError:
337 except IOError:
338 try:
338 try:
339 del mm[f]
339 del mm[f]
340 del mfm[f]
340 del mfm[f]
341 if update_dirstate:
341 if update_dirstate:
342 self.dirstate.forget([f])
342 self.dirstate.forget([f])
343 except:
343 except:
344 # deleted from p2?
344 # deleted from p2?
345 pass
345 pass
346
346
347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
347 mnode = self.manifest.add(mm, mfm, tr, linkrev, c1[0], c2[0])
348 user = user or self.ui.username()
348 user = user or self.ui.username()
349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
349 n = self.changelog.add(mnode, changed, text, tr, p1, p2, user, date)
350 tr.close()
350 tr.close()
351 if update_dirstate:
351 if update_dirstate:
352 self.dirstate.setparents(n, nullid)
352 self.dirstate.setparents(n, nullid)
353
353
354 def commit(self, files=None, text="", user=None, date=None,
354 def commit(self, files=None, text="", user=None, date=None,
355 match=util.always, force=False, lock=None, wlock=None):
355 match=util.always, force=False, lock=None, wlock=None):
356 commit = []
356 commit = []
357 remove = []
357 remove = []
358 changed = []
358 changed = []
359
359
360 if files:
360 if files:
361 for f in files:
361 for f in files:
362 s = self.dirstate.state(f)
362 s = self.dirstate.state(f)
363 if s in 'nmai':
363 if s in 'nmai':
364 commit.append(f)
364 commit.append(f)
365 elif s == 'r':
365 elif s == 'r':
366 remove.append(f)
366 remove.append(f)
367 else:
367 else:
368 self.ui.warn(_("%s not tracked!\n") % f)
368 self.ui.warn(_("%s not tracked!\n") % f)
369 else:
369 else:
370 modified, added, removed, deleted, unknown = self.changes(match=match)
370 modified, added, removed, deleted, unknown = self.changes(match=match)
371 commit = modified + added
371 commit = modified + added
372 remove = removed
372 remove = removed
373
373
374 p1, p2 = self.dirstate.parents()
374 p1, p2 = self.dirstate.parents()
375 c1 = self.changelog.read(p1)
375 c1 = self.changelog.read(p1)
376 c2 = self.changelog.read(p2)
376 c2 = self.changelog.read(p2)
377 m1 = self.manifest.read(c1[0])
377 m1 = self.manifest.read(c1[0])
378 mf1 = self.manifest.readflags(c1[0])
378 mf1 = self.manifest.readflags(c1[0])
379 m2 = self.manifest.read(c2[0])
379 m2 = self.manifest.read(c2[0])
380
380
381 if not commit and not remove and not force and p2 == nullid:
381 if not commit and not remove and not force and p2 == nullid:
382 self.ui.status(_("nothing changed\n"))
382 self.ui.status(_("nothing changed\n"))
383 return None
383 return None
384
384
385 xp1 = hex(p1)
385 xp1 = hex(p1)
386 if p2 == nullid: xp2 = ''
386 if p2 == nullid: xp2 = ''
387 else: xp2 = hex(p2)
387 else: xp2 = hex(p2)
388
388
389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
389 self.hook("precommit", throw=True, parent1=xp1, parent2=xp2)
390
390
391 if not wlock:
391 if not wlock:
392 wlock = self.wlock()
392 wlock = self.wlock()
393 if not lock:
393 if not lock:
394 lock = self.lock()
394 lock = self.lock()
395 tr = self.transaction()
395 tr = self.transaction()
396
396
397 # check in files
397 # check in files
398 new = {}
398 new = {}
399 linkrev = self.changelog.count()
399 linkrev = self.changelog.count()
400 commit.sort()
400 commit.sort()
401 for f in commit:
401 for f in commit:
402 self.ui.note(f + "\n")
402 self.ui.note(f + "\n")
403 try:
403 try:
404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
404 mf1[f] = util.is_exec(self.wjoin(f), mf1.get(f, False))
405 t = self.wread(f)
405 t = self.wread(f)
406 except IOError:
406 except IOError:
407 self.ui.warn(_("trouble committing %s!\n") % f)
407 self.ui.warn(_("trouble committing %s!\n") % f)
408 raise
408 raise
409
409
410 r = self.file(f)
410 r = self.file(f)
411
411
412 meta = {}
412 meta = {}
413 cp = self.dirstate.copied(f)
413 cp = self.dirstate.copied(f)
414 if cp:
414 if cp:
415 meta["copy"] = cp
415 meta["copy"] = cp
416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
416 meta["copyrev"] = hex(m1.get(cp, m2.get(cp, nullid)))
417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
417 self.ui.debug(_(" %s: copy %s:%s\n") % (f, cp, meta["copyrev"]))
418 fp1, fp2 = nullid, nullid
418 fp1, fp2 = nullid, nullid
419 else:
419 else:
420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
420 entry, fp1, fp2 = self.checkfilemerge(f, t, r, m1, m2)
421 if entry:
421 if entry:
422 new[f] = entry
422 new[f] = entry
423 continue
423 continue
424
424
425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
425 new[f] = r.add(t, meta, tr, linkrev, fp1, fp2)
426 # remember what we've added so that we can later calculate
426 # remember what we've added so that we can later calculate
427 # the files to pull from a set of changesets
427 # the files to pull from a set of changesets
428 changed.append(f)
428 changed.append(f)
429
429
430 # update manifest
430 # update manifest
431 m1 = m1.copy()
431 m1 = m1.copy()
432 m1.update(new)
432 m1.update(new)
433 for f in remove:
433 for f in remove:
434 if f in m1:
434 if f in m1:
435 del m1[f]
435 del m1[f]
436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
436 mn = self.manifest.add(m1, mf1, tr, linkrev, c1[0], c2[0],
437 (new, remove))
437 (new, remove))
438
438
439 # add changeset
439 # add changeset
440 new = new.keys()
440 new = new.keys()
441 new.sort()
441 new.sort()
442
442
443 if not text:
443 if not text:
444 edittext = [""]
444 edittext = [""]
445 if p2 != nullid:
445 if p2 != nullid:
446 edittext.append("HG: branch merge")
446 edittext.append("HG: branch merge")
447 edittext.extend(["HG: changed %s" % f for f in changed])
447 edittext.extend(["HG: changed %s" % f for f in changed])
448 edittext.extend(["HG: removed %s" % f for f in remove])
448 edittext.extend(["HG: removed %s" % f for f in remove])
449 if not changed and not remove:
449 if not changed and not remove:
450 edittext.append("HG: no files changed")
450 edittext.append("HG: no files changed")
451 edittext.append("")
451 edittext.append("")
452 # run editor in the repository root
452 # run editor in the repository root
453 olddir = os.getcwd()
453 olddir = os.getcwd()
454 os.chdir(self.root)
454 os.chdir(self.root)
455 edittext = self.ui.edit("\n".join(edittext))
455 edittext = self.ui.edit("\n".join(edittext))
456 os.chdir(olddir)
456 os.chdir(olddir)
457 if not edittext.rstrip():
457 if not edittext.rstrip():
458 return None
458 return None
459 text = edittext
459 text = edittext
460
460
461 user = user or self.ui.username()
461 user = user or self.ui.username()
462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
462 n = self.changelog.add(mn, changed + remove, text, tr, p1, p2, user, date)
463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
463 self.hook('pretxncommit', throw=True, node=hex(n), parent1=xp1,
464 parent2=xp2)
464 parent2=xp2)
465 tr.close()
465 tr.close()
466
466
467 self.dirstate.setparents(n)
467 self.dirstate.setparents(n)
468 self.dirstate.update(new, "n")
468 self.dirstate.update(new, "n")
469 self.dirstate.forget(remove)
469 self.dirstate.forget(remove)
470
470
471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
471 self.hook("commit", node=hex(n), parent1=xp1, parent2=xp2)
472 return n
472 return n
473
473
474 def walk(self, node=None, files=[], match=util.always):
474 def walk(self, node=None, files=[], match=util.always):
475 if node:
475 if node:
476 fdict = dict.fromkeys(files)
476 fdict = dict.fromkeys(files)
477 for fn in self.manifest.read(self.changelog.read(node)[0]):
477 for fn in self.manifest.read(self.changelog.read(node)[0]):
478 fdict.pop(fn, None)
478 fdict.pop(fn, None)
479 if match(fn):
479 if match(fn):
480 yield 'm', fn
480 yield 'm', fn
481 for fn in fdict:
481 for fn in fdict:
482 self.ui.warn(_('%s: No such file in rev %s\n') % (
482 self.ui.warn(_('%s: No such file in rev %s\n') % (
483 util.pathto(self.getcwd(), fn), short(node)))
483 util.pathto(self.getcwd(), fn), short(node)))
484 else:
484 else:
485 for src, fn in self.dirstate.walk(files, match):
485 for src, fn in self.dirstate.walk(files, match):
486 yield src, fn
486 yield src, fn
487
487
488 def changes(self, node1=None, node2=None, files=[], match=util.always,
488 def changes(self, node1=None, node2=None, files=[], match=util.always,
489 wlock=None):
489 wlock=None):
490 """return changes between two nodes or node and working directory
490 """return changes between two nodes or node and working directory
491
491
492 If node1 is None, use the first dirstate parent instead.
492 If node1 is None, use the first dirstate parent instead.
493 If node2 is None, compare node1 with working directory.
493 If node2 is None, compare node1 with working directory.
494 """
494 """
495
495
496 def fcmp(fn, mf):
496 def fcmp(fn, mf):
497 t1 = self.wread(fn)
497 t1 = self.wread(fn)
498 t2 = self.file(fn).read(mf.get(fn, nullid))
498 t2 = self.file(fn).read(mf.get(fn, nullid))
499 return cmp(t1, t2)
499 return cmp(t1, t2)
500
500
501 def mfmatches(node):
501 def mfmatches(node):
502 change = self.changelog.read(node)
502 change = self.changelog.read(node)
503 mf = dict(self.manifest.read(change[0]))
503 mf = dict(self.manifest.read(change[0]))
504 for fn in mf.keys():
504 for fn in mf.keys():
505 if not match(fn):
505 if not match(fn):
506 del mf[fn]
506 del mf[fn]
507 return mf
507 return mf
508
508
509 if node1:
509 if node1:
510 # read the manifest from node1 before the manifest from node2,
510 # read the manifest from node1 before the manifest from node2,
511 # so that we'll hit the manifest cache if we're going through
511 # so that we'll hit the manifest cache if we're going through
512 # all the revisions in parent->child order.
512 # all the revisions in parent->child order.
513 mf1 = mfmatches(node1)
513 mf1 = mfmatches(node1)
514
514
515 # are we comparing the working directory?
515 # are we comparing the working directory?
516 if not node2:
516 if not node2:
517 if not wlock:
517 if not wlock:
518 try:
518 try:
519 wlock = self.wlock(wait=0)
519 wlock = self.wlock(wait=0)
520 except lock.LockException:
520 except lock.LockException:
521 wlock = None
521 wlock = None
522 lookup, modified, added, removed, deleted, unknown = (
522 lookup, modified, added, removed, deleted, unknown = (
523 self.dirstate.changes(files, match))
523 self.dirstate.changes(files, match))
524
524
525 # are we comparing working dir against its parent?
525 # are we comparing working dir against its parent?
526 if not node1:
526 if not node1:
527 if lookup:
527 if lookup:
528 # do a full compare of any files that might have changed
528 # do a full compare of any files that might have changed
529 mf2 = mfmatches(self.dirstate.parents()[0])
529 mf2 = mfmatches(self.dirstate.parents()[0])
530 for f in lookup:
530 for f in lookup:
531 if fcmp(f, mf2):
531 if fcmp(f, mf2):
532 modified.append(f)
532 modified.append(f)
533 elif wlock is not None:
533 elif wlock is not None:
534 self.dirstate.update([f], "n")
534 self.dirstate.update([f], "n")
535 else:
535 else:
536 # we are comparing working dir against non-parent
536 # we are comparing working dir against non-parent
537 # generate a pseudo-manifest for the working dir
537 # generate a pseudo-manifest for the working dir
538 mf2 = mfmatches(self.dirstate.parents()[0])
538 mf2 = mfmatches(self.dirstate.parents()[0])
539 for f in lookup + modified + added:
539 for f in lookup + modified + added:
540 mf2[f] = ""
540 mf2[f] = ""
541 for f in removed:
541 for f in removed:
542 if f in mf2:
542 if f in mf2:
543 del mf2[f]
543 del mf2[f]
544 else:
544 else:
545 # we are comparing two revisions
545 # we are comparing two revisions
546 deleted, unknown = [], []
546 deleted, unknown = [], []
547 mf2 = mfmatches(node2)
547 mf2 = mfmatches(node2)
548
548
549 if node1:
549 if node1:
550 # flush lists from dirstate before comparing manifests
550 # flush lists from dirstate before comparing manifests
551 modified, added = [], []
551 modified, added = [], []
552
552
553 for fn in mf2:
553 for fn in mf2:
554 if mf1.has_key(fn):
554 if mf1.has_key(fn):
555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
555 if mf1[fn] != mf2[fn] and (mf2[fn] != "" or fcmp(fn, mf1)):
556 modified.append(fn)
556 modified.append(fn)
557 del mf1[fn]
557 del mf1[fn]
558 else:
558 else:
559 added.append(fn)
559 added.append(fn)
560
560
561 removed = mf1.keys()
561 removed = mf1.keys()
562
562
563 # sort and return results:
563 # sort and return results:
564 for l in modified, added, removed, deleted, unknown:
564 for l in modified, added, removed, deleted, unknown:
565 l.sort()
565 l.sort()
566 return (modified, added, removed, deleted, unknown)
566 return (modified, added, removed, deleted, unknown)
567
567
568 def add(self, list, wlock=None):
568 def add(self, list, wlock=None):
569 if not wlock:
569 if not wlock:
570 wlock = self.wlock()
570 wlock = self.wlock()
571 for f in list:
571 for f in list:
572 p = self.wjoin(f)
572 p = self.wjoin(f)
573 if not os.path.exists(p):
573 if not os.path.exists(p):
574 self.ui.warn(_("%s does not exist!\n") % f)
574 self.ui.warn(_("%s does not exist!\n") % f)
575 elif not os.path.isfile(p):
575 elif not os.path.isfile(p):
576 self.ui.warn(_("%s not added: only files supported currently\n")
576 self.ui.warn(_("%s not added: only files supported currently\n")
577 % f)
577 % f)
578 elif self.dirstate.state(f) in 'an':
578 elif self.dirstate.state(f) in 'an':
579 self.ui.warn(_("%s already tracked!\n") % f)
579 self.ui.warn(_("%s already tracked!\n") % f)
580 else:
580 else:
581 self.dirstate.update([f], "a")
581 self.dirstate.update([f], "a")
582
582
583 def forget(self, list, wlock=None):
583 def forget(self, list, wlock=None):
584 if not wlock:
584 if not wlock:
585 wlock = self.wlock()
585 wlock = self.wlock()
586 for f in list:
586 for f in list:
587 if self.dirstate.state(f) not in 'ai':
587 if self.dirstate.state(f) not in 'ai':
588 self.ui.warn(_("%s not added!\n") % f)
588 self.ui.warn(_("%s not added!\n") % f)
589 else:
589 else:
590 self.dirstate.forget([f])
590 self.dirstate.forget([f])
591
591
592 def remove(self, list, unlink=False, wlock=None):
592 def remove(self, list, unlink=False, wlock=None):
593 if unlink:
593 if unlink:
594 for f in list:
594 for f in list:
595 try:
595 try:
596 util.unlink(self.wjoin(f))
596 util.unlink(self.wjoin(f))
597 except OSError, inst:
597 except OSError, inst:
598 if inst.errno != errno.ENOENT:
598 if inst.errno != errno.ENOENT:
599 raise
599 raise
600 if not wlock:
600 if not wlock:
601 wlock = self.wlock()
601 wlock = self.wlock()
602 for f in list:
602 for f in list:
603 p = self.wjoin(f)
603 p = self.wjoin(f)
604 if os.path.exists(p):
604 if os.path.exists(p):
605 self.ui.warn(_("%s still exists!\n") % f)
605 self.ui.warn(_("%s still exists!\n") % f)
606 elif self.dirstate.state(f) == 'a':
606 elif self.dirstate.state(f) == 'a':
607 self.dirstate.forget([f])
607 self.dirstate.forget([f])
608 elif f not in self.dirstate:
608 elif f not in self.dirstate:
609 self.ui.warn(_("%s not tracked!\n") % f)
609 self.ui.warn(_("%s not tracked!\n") % f)
610 else:
610 else:
611 self.dirstate.update([f], "r")
611 self.dirstate.update([f], "r")
612
612
613 def undelete(self, list, wlock=None):
613 def undelete(self, list, wlock=None):
614 p = self.dirstate.parents()[0]
614 p = self.dirstate.parents()[0]
615 mn = self.changelog.read(p)[0]
615 mn = self.changelog.read(p)[0]
616 mf = self.manifest.readflags(mn)
616 mf = self.manifest.readflags(mn)
617 m = self.manifest.read(mn)
617 m = self.manifest.read(mn)
618 if not wlock:
618 if not wlock:
619 wlock = self.wlock()
619 wlock = self.wlock()
620 for f in list:
620 for f in list:
621 if self.dirstate.state(f) not in "r":
621 if self.dirstate.state(f) not in "r":
622 self.ui.warn("%s not removed!\n" % f)
622 self.ui.warn("%s not removed!\n" % f)
623 else:
623 else:
624 t = self.file(f).read(m[f])
624 t = self.file(f).read(m[f])
625 self.wwrite(f, t)
625 self.wwrite(f, t)
626 util.set_exec(self.wjoin(f), mf[f])
626 util.set_exec(self.wjoin(f), mf[f])
627 self.dirstate.update([f], "n")
627 self.dirstate.update([f], "n")
628
628
629 def copy(self, source, dest, wlock=None):
629 def copy(self, source, dest, wlock=None):
630 p = self.wjoin(dest)
630 p = self.wjoin(dest)
631 if not os.path.exists(p):
631 if not os.path.exists(p):
632 self.ui.warn(_("%s does not exist!\n") % dest)
632 self.ui.warn(_("%s does not exist!\n") % dest)
633 elif not os.path.isfile(p):
633 elif not os.path.isfile(p):
634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
634 self.ui.warn(_("copy failed: %s is not a file\n") % dest)
635 else:
635 else:
636 if not wlock:
636 if not wlock:
637 wlock = self.wlock()
637 wlock = self.wlock()
638 if self.dirstate.state(dest) == '?':
638 if self.dirstate.state(dest) == '?':
639 self.dirstate.update([dest], "a")
639 self.dirstate.update([dest], "a")
640 self.dirstate.copy(source, dest)
640 self.dirstate.copy(source, dest)
641
641
642 def heads(self, start=None):
642 def heads(self, start=None):
643 heads = self.changelog.heads(start)
643 heads = self.changelog.heads(start)
644 # sort the output in rev descending order
644 # sort the output in rev descending order
645 heads = [(-self.changelog.rev(h), h) for h in heads]
645 heads = [(-self.changelog.rev(h), h) for h in heads]
646 heads.sort()
646 heads.sort()
647 return [n for (r, n) in heads]
647 return [n for (r, n) in heads]
648
648
649 # branchlookup returns a dict giving a list of branches for
649 # branchlookup returns a dict giving a list of branches for
650 # each head. A branch is defined as the tag of a node or
650 # each head. A branch is defined as the tag of a node or
651 # the branch of the node's parents. If a node has multiple
651 # the branch of the node's parents. If a node has multiple
652 # branch tags, tags are eliminated if they are visible from other
652 # branch tags, tags are eliminated if they are visible from other
653 # branch tags.
653 # branch tags.
654 #
654 #
655 # So, for this graph: a->b->c->d->e
655 # So, for this graph: a->b->c->d->e
656 # \ /
656 # \ /
657 # aa -----/
657 # aa -----/
658 # a has tag 2.6.12
658 # a has tag 2.6.12
659 # d has tag 2.6.13
659 # d has tag 2.6.13
660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
660 # e would have branch tags for 2.6.12 and 2.6.13. Because the node
661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
661 # for 2.6.12 can be reached from the node 2.6.13, that is eliminated
662 # from the list.
662 # from the list.
663 #
663 #
664 # It is possible that more than one head will have the same branch tag.
664 # It is possible that more than one head will have the same branch tag.
665 # callers need to check the result for multiple heads under the same
665 # callers need to check the result for multiple heads under the same
666 # branch tag if that is a problem for them (ie checkout of a specific
666 # branch tag if that is a problem for them (ie checkout of a specific
667 # branch).
667 # branch).
668 #
668 #
669 # passing in a specific branch will limit the depth of the search
669 # passing in a specific branch will limit the depth of the search
670 # through the parents. It won't limit the branches returned in the
670 # through the parents. It won't limit the branches returned in the
671 # result though.
671 # result though.
672 def branchlookup(self, heads=None, branch=None):
672 def branchlookup(self, heads=None, branch=None):
673 if not heads:
673 if not heads:
674 heads = self.heads()
674 heads = self.heads()
675 headt = [ h for h in heads ]
675 headt = [ h for h in heads ]
676 chlog = self.changelog
676 chlog = self.changelog
677 branches = {}
677 branches = {}
678 merges = []
678 merges = []
679 seenmerge = {}
679 seenmerge = {}
680
680
681 # traverse the tree once for each head, recording in the branches
681 # traverse the tree once for each head, recording in the branches
682 # dict which tags are visible from this head. The branches
682 # dict which tags are visible from this head. The branches
683 # dict also records which tags are visible from each tag
683 # dict also records which tags are visible from each tag
684 # while we traverse.
684 # while we traverse.
685 while headt or merges:
685 while headt or merges:
686 if merges:
686 if merges:
687 n, found = merges.pop()
687 n, found = merges.pop()
688 visit = [n]
688 visit = [n]
689 else:
689 else:
690 h = headt.pop()
690 h = headt.pop()
691 visit = [h]
691 visit = [h]
692 found = [h]
692 found = [h]
693 seen = {}
693 seen = {}
694 while visit:
694 while visit:
695 n = visit.pop()
695 n = visit.pop()
696 if n in seen:
696 if n in seen:
697 continue
697 continue
698 pp = chlog.parents(n)
698 pp = chlog.parents(n)
699 tags = self.nodetags(n)
699 tags = self.nodetags(n)
700 if tags:
700 if tags:
701 for x in tags:
701 for x in tags:
702 if x == 'tip':
702 if x == 'tip':
703 continue
703 continue
704 for f in found:
704 for f in found:
705 branches.setdefault(f, {})[n] = 1
705 branches.setdefault(f, {})[n] = 1
706 branches.setdefault(n, {})[n] = 1
706 branches.setdefault(n, {})[n] = 1
707 break
707 break
708 if n not in found:
708 if n not in found:
709 found.append(n)
709 found.append(n)
710 if branch in tags:
710 if branch in tags:
711 continue
711 continue
712 seen[n] = 1
712 seen[n] = 1
713 if pp[1] != nullid and n not in seenmerge:
713 if pp[1] != nullid and n not in seenmerge:
714 merges.append((pp[1], [x for x in found]))
714 merges.append((pp[1], [x for x in found]))
715 seenmerge[n] = 1
715 seenmerge[n] = 1
716 if pp[0] != nullid:
716 if pp[0] != nullid:
717 visit.append(pp[0])
717 visit.append(pp[0])
718 # traverse the branches dict, eliminating branch tags from each
718 # traverse the branches dict, eliminating branch tags from each
719 # head that are visible from another branch tag for that head.
719 # head that are visible from another branch tag for that head.
720 out = {}
720 out = {}
721 viscache = {}
721 viscache = {}
722 for h in heads:
722 for h in heads:
723 def visible(node):
723 def visible(node):
724 if node in viscache:
724 if node in viscache:
725 return viscache[node]
725 return viscache[node]
726 ret = {}
726 ret = {}
727 visit = [node]
727 visit = [node]
728 while visit:
728 while visit:
729 x = visit.pop()
729 x = visit.pop()
730 if x in viscache:
730 if x in viscache:
731 ret.update(viscache[x])
731 ret.update(viscache[x])
732 elif x not in ret:
732 elif x not in ret:
733 ret[x] = 1
733 ret[x] = 1
734 if x in branches:
734 if x in branches:
735 visit[len(visit):] = branches[x].keys()
735 visit[len(visit):] = branches[x].keys()
736 viscache[node] = ret
736 viscache[node] = ret
737 return ret
737 return ret
738 if h not in branches:
738 if h not in branches:
739 continue
739 continue
740 # O(n^2), but somewhat limited. This only searches the
740 # O(n^2), but somewhat limited. This only searches the
741 # tags visible from a specific head, not all the tags in the
741 # tags visible from a specific head, not all the tags in the
742 # whole repo.
742 # whole repo.
743 for b in branches[h]:
743 for b in branches[h]:
744 vis = False
744 vis = False
745 for bb in branches[h].keys():
745 for bb in branches[h].keys():
746 if b != bb:
746 if b != bb:
747 if b in visible(bb):
747 if b in visible(bb):
748 vis = True
748 vis = True
749 break
749 break
750 if not vis:
750 if not vis:
751 l = out.setdefault(h, [])
751 l = out.setdefault(h, [])
752 l[len(l):] = self.nodetags(b)
752 l[len(l):] = self.nodetags(b)
753 return out
753 return out
754
754
755 def branches(self, nodes):
755 def branches(self, nodes):
756 if not nodes:
756 if not nodes:
757 nodes = [self.changelog.tip()]
757 nodes = [self.changelog.tip()]
758 b = []
758 b = []
759 for n in nodes:
759 for n in nodes:
760 t = n
760 t = n
761 while n:
761 while n:
762 p = self.changelog.parents(n)
762 p = self.changelog.parents(n)
763 if p[1] != nullid or p[0] == nullid:
763 if p[1] != nullid or p[0] == nullid:
764 b.append((t, n, p[0], p[1]))
764 b.append((t, n, p[0], p[1]))
765 break
765 break
766 n = p[0]
766 n = p[0]
767 return b
767 return b
768
768
769 def between(self, pairs):
769 def between(self, pairs):
770 r = []
770 r = []
771
771
772 for top, bottom in pairs:
772 for top, bottom in pairs:
773 n, l, i = top, [], 0
773 n, l, i = top, [], 0
774 f = 1
774 f = 1
775
775
776 while n != bottom:
776 while n != bottom:
777 p = self.changelog.parents(n)[0]
777 p = self.changelog.parents(n)[0]
778 if i == f:
778 if i == f:
779 l.append(n)
779 l.append(n)
780 f = f * 2
780 f = f * 2
781 n = p
781 n = p
782 i += 1
782 i += 1
783
783
784 r.append(l)
784 r.append(l)
785
785
786 return r
786 return r
787
787
788 def findincoming(self, remote, base=None, heads=None):
788 def findincoming(self, remote, base=None, heads=None):
789 m = self.changelog.nodemap
789 m = self.changelog.nodemap
790 search = []
790 search = []
791 fetch = {}
791 fetch = {}
792 seen = {}
792 seen = {}
793 seenbranch = {}
793 seenbranch = {}
794 if base == None:
794 if base == None:
795 base = {}
795 base = {}
796
796
797 # assume we're closer to the tip than the root
797 # assume we're closer to the tip than the root
798 # and start by examining the heads
798 # and start by examining the heads
799 self.ui.status(_("searching for changes\n"))
799 self.ui.status(_("searching for changes\n"))
800
800
801 if not heads:
801 if not heads:
802 heads = remote.heads()
802 heads = remote.heads()
803
803
804 unknown = []
804 unknown = []
805 for h in heads:
805 for h in heads:
806 if h not in m:
806 if h not in m:
807 unknown.append(h)
807 unknown.append(h)
808 else:
808 else:
809 base[h] = 1
809 base[h] = 1
810
810
811 if not unknown:
811 if not unknown:
812 return None
812 return None
813
813
814 rep = {}
814 rep = {}
815 reqcnt = 0
815 reqcnt = 0
816
816
817 # search through remote branches
817 # search through remote branches
818 # a 'branch' here is a linear segment of history, with four parts:
818 # a 'branch' here is a linear segment of history, with four parts:
819 # head, root, first parent, second parent
819 # head, root, first parent, second parent
820 # (a branch always has two parents (or none) by definition)
820 # (a branch always has two parents (or none) by definition)
821 unknown = remote.branches(unknown)
821 unknown = remote.branches(unknown)
822 while unknown:
822 while unknown:
823 r = []
823 r = []
824 while unknown:
824 while unknown:
825 n = unknown.pop(0)
825 n = unknown.pop(0)
826 if n[0] in seen:
826 if n[0] in seen:
827 continue
827 continue
828
828
829 self.ui.debug(_("examining %s:%s\n")
829 self.ui.debug(_("examining %s:%s\n")
830 % (short(n[0]), short(n[1])))
830 % (short(n[0]), short(n[1])))
831 if n[0] == nullid:
831 if n[0] == nullid:
832 break
832 break
833 if n in seenbranch:
833 if n in seenbranch:
834 self.ui.debug(_("branch already found\n"))
834 self.ui.debug(_("branch already found\n"))
835 continue
835 continue
836 if n[1] and n[1] in m: # do we know the base?
836 if n[1] and n[1] in m: # do we know the base?
837 self.ui.debug(_("found incomplete branch %s:%s\n")
837 self.ui.debug(_("found incomplete branch %s:%s\n")
838 % (short(n[0]), short(n[1])))
838 % (short(n[0]), short(n[1])))
839 search.append(n) # schedule branch range for scanning
839 search.append(n) # schedule branch range for scanning
840 seenbranch[n] = 1
840 seenbranch[n] = 1
841 else:
841 else:
842 if n[1] not in seen and n[1] not in fetch:
842 if n[1] not in seen and n[1] not in fetch:
843 if n[2] in m and n[3] in m:
843 if n[2] in m and n[3] in m:
844 self.ui.debug(_("found new changeset %s\n") %
844 self.ui.debug(_("found new changeset %s\n") %
845 short(n[1]))
845 short(n[1]))
846 fetch[n[1]] = 1 # earliest unknown
846 fetch[n[1]] = 1 # earliest unknown
847 base[n[2]] = 1 # latest known
847 base[n[2]] = 1 # latest known
848 continue
848 continue
849
849
850 for a in n[2:4]:
850 for a in n[2:4]:
851 if a not in rep:
851 if a not in rep:
852 r.append(a)
852 r.append(a)
853 rep[a] = 1
853 rep[a] = 1
854
854
855 seen[n[0]] = 1
855 seen[n[0]] = 1
856
856
857 if r:
857 if r:
858 reqcnt += 1
858 reqcnt += 1
859 self.ui.debug(_("request %d: %s\n") %
859 self.ui.debug(_("request %d: %s\n") %
860 (reqcnt, " ".join(map(short, r))))
860 (reqcnt, " ".join(map(short, r))))
861 for p in range(0, len(r), 10):
861 for p in range(0, len(r), 10):
862 for b in remote.branches(r[p:p+10]):
862 for b in remote.branches(r[p:p+10]):
863 self.ui.debug(_("received %s:%s\n") %
863 self.ui.debug(_("received %s:%s\n") %
864 (short(b[0]), short(b[1])))
864 (short(b[0]), short(b[1])))
865 if b[0] in m:
865 if b[0] in m:
866 self.ui.debug(_("found base node %s\n")
866 self.ui.debug(_("found base node %s\n")
867 % short(b[0]))
867 % short(b[0]))
868 base[b[0]] = 1
868 base[b[0]] = 1
869 elif b[0] not in seen:
869 elif b[0] not in seen:
870 unknown.append(b)
870 unknown.append(b)
871
871
872 # do binary search on the branches we found
872 # do binary search on the branches we found
873 while search:
873 while search:
874 n = search.pop(0)
874 n = search.pop(0)
875 reqcnt += 1
875 reqcnt += 1
876 l = remote.between([(n[0], n[1])])[0]
876 l = remote.between([(n[0], n[1])])[0]
877 l.append(n[1])
877 l.append(n[1])
878 p = n[0]
878 p = n[0]
879 f = 1
879 f = 1
880 for i in l:
880 for i in l:
881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
881 self.ui.debug(_("narrowing %d:%d %s\n") % (f, len(l), short(i)))
882 if i in m:
882 if i in m:
883 if f <= 2:
883 if f <= 2:
884 self.ui.debug(_("found new branch changeset %s\n") %
884 self.ui.debug(_("found new branch changeset %s\n") %
885 short(p))
885 short(p))
886 fetch[p] = 1
886 fetch[p] = 1
887 base[i] = 1
887 base[i] = 1
888 else:
888 else:
889 self.ui.debug(_("narrowed branch search to %s:%s\n")
889 self.ui.debug(_("narrowed branch search to %s:%s\n")
890 % (short(p), short(i)))
890 % (short(p), short(i)))
891 search.append((p, i))
891 search.append((p, i))
892 break
892 break
893 p, f = i, f * 2
893 p, f = i, f * 2
894
894
895 # sanity check our fetch list
895 # sanity check our fetch list
896 for f in fetch.keys():
896 for f in fetch.keys():
897 if f in m:
897 if f in m:
898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
898 raise repo.RepoError(_("already have changeset ") + short(f[:4]))
899
899
900 if base.keys() == [nullid]:
900 if base.keys() == [nullid]:
901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
901 self.ui.warn(_("warning: pulling from an unrelated repository!\n"))
902
902
903 self.ui.note(_("found new changesets starting at ") +
903 self.ui.note(_("found new changesets starting at ") +
904 " ".join([short(f) for f in fetch]) + "\n")
904 " ".join([short(f) for f in fetch]) + "\n")
905
905
906 self.ui.debug(_("%d total queries\n") % reqcnt)
906 self.ui.debug(_("%d total queries\n") % reqcnt)
907
907
908 return fetch.keys()
908 return fetch.keys()
909
909
910 def findoutgoing(self, remote, base=None, heads=None):
910 def findoutgoing(self, remote, base=None, heads=None):
911 if base == None:
911 if base == None:
912 base = {}
912 base = {}
913 self.findincoming(remote, base, heads)
913 self.findincoming(remote, base, heads)
914
914
915 self.ui.debug(_("common changesets up to ")
915 self.ui.debug(_("common changesets up to ")
916 + " ".join(map(short, base.keys())) + "\n")
916 + " ".join(map(short, base.keys())) + "\n")
917
917
918 remain = dict.fromkeys(self.changelog.nodemap)
918 remain = dict.fromkeys(self.changelog.nodemap)
919
919
920 # prune everything remote has from the tree
920 # prune everything remote has from the tree
921 del remain[nullid]
921 del remain[nullid]
922 remove = base.keys()
922 remove = base.keys()
923 while remove:
923 while remove:
924 n = remove.pop(0)
924 n = remove.pop(0)
925 if n in remain:
925 if n in remain:
926 del remain[n]
926 del remain[n]
927 for p in self.changelog.parents(n):
927 for p in self.changelog.parents(n):
928 remove.append(p)
928 remove.append(p)
929
929
930 # find every node whose parents have been pruned
930 # find every node whose parents have been pruned
931 subset = []
931 subset = []
932 for n in remain:
932 for n in remain:
933 p1, p2 = self.changelog.parents(n)
933 p1, p2 = self.changelog.parents(n)
934 if p1 not in remain and p2 not in remain:
934 if p1 not in remain and p2 not in remain:
935 subset.append(n)
935 subset.append(n)
936
936
937 # this is the set of all roots we have to push
937 # this is the set of all roots we have to push
938 return subset
938 return subset
939
939
940 def pull(self, remote, heads=None):
940 def pull(self, remote, heads=None):
941 l = self.lock()
941 l = self.lock()
942
942
943 # if we have an empty repo, fetch everything
943 # if we have an empty repo, fetch everything
944 if self.changelog.tip() == nullid:
944 if self.changelog.tip() == nullid:
945 self.ui.status(_("requesting all changes\n"))
945 self.ui.status(_("requesting all changes\n"))
946 fetch = [nullid]
946 fetch = [nullid]
947 else:
947 else:
948 fetch = self.findincoming(remote)
948 fetch = self.findincoming(remote)
949
949
950 if not fetch:
950 if not fetch:
951 self.ui.status(_("no changes found\n"))
951 self.ui.status(_("no changes found\n"))
952 return 1
952 return 1
953
953
954 if heads is None:
954 if heads is None:
955 cg = remote.changegroup(fetch, 'pull')
955 cg = remote.changegroup(fetch, 'pull')
956 else:
956 else:
957 cg = remote.changegroupsubset(fetch, heads, 'pull')
957 cg = remote.changegroupsubset(fetch, heads, 'pull')
958 return self.addchangegroup(cg)
958 return self.addchangegroup(cg)
959
959
960 def push(self, remote, force=False, revs=None):
960 def push(self, remote, force=False, revs=None):
961 lock = remote.lock()
961 lock = remote.lock()
962
962
963 base = {}
963 base = {}
964 heads = remote.heads()
964 heads = remote.heads()
965 inc = self.findincoming(remote, base, heads)
965 inc = self.findincoming(remote, base, heads)
966 if not force and inc:
966 if not force and inc:
967 self.ui.warn(_("abort: unsynced remote changes!\n"))
967 self.ui.warn(_("abort: unsynced remote changes!\n"))
968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
968 self.ui.status(_("(did you forget to sync? use push -f to force)\n"))
969 return 1
969 return 1
970
970
971 update = self.findoutgoing(remote, base)
971 update = self.findoutgoing(remote, base)
972 if revs is not None:
972 if revs is not None:
973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
973 msng_cl, bases, heads = self.changelog.nodesbetween(update, revs)
974 else:
974 else:
975 bases, heads = update, self.changelog.heads()
975 bases, heads = update, self.changelog.heads()
976
976
977 if not bases:
977 if not bases:
978 self.ui.status(_("no changes found\n"))
978 self.ui.status(_("no changes found\n"))
979 return 1
979 return 1
980 elif not force:
980 elif not force:
981 if len(bases) < len(heads):
981 if len(bases) < len(heads):
982 self.ui.warn(_("abort: push creates new remote branches!\n"))
982 self.ui.warn(_("abort: push creates new remote branches!\n"))
983 self.ui.status(_("(did you forget to merge?"
983 self.ui.status(_("(did you forget to merge?"
984 " use push -f to force)\n"))
984 " use push -f to force)\n"))
985 return 1
985 return 1
986
986
987 if revs is None:
987 if revs is None:
988 cg = self.changegroup(update, 'push')
988 cg = self.changegroup(update, 'push')
989 else:
989 else:
990 cg = self.changegroupsubset(update, revs, 'push')
990 cg = self.changegroupsubset(update, revs, 'push')
991 return remote.addchangegroup(cg)
991 return remote.addchangegroup(cg)
992
992
993 def changegroupsubset(self, bases, heads, source):
993 def changegroupsubset(self, bases, heads, source):
994 """This function generates a changegroup consisting of all the nodes
994 """This function generates a changegroup consisting of all the nodes
995 that are descendents of any of the bases, and ancestors of any of
995 that are descendents of any of the bases, and ancestors of any of
996 the heads.
996 the heads.
997
997
998 It is fairly complex as determining which filenodes and which
998 It is fairly complex as determining which filenodes and which
999 manifest nodes need to be included for the changeset to be complete
999 manifest nodes need to be included for the changeset to be complete
1000 is non-trivial.
1000 is non-trivial.
1001
1001
1002 Another wrinkle is doing the reverse, figuring out which changeset in
1002 Another wrinkle is doing the reverse, figuring out which changeset in
1003 the changegroup a particular filenode or manifestnode belongs to."""
1003 the changegroup a particular filenode or manifestnode belongs to."""
1004
1004
1005 self.hook('preoutgoing', throw=True, source=source)
1005 self.hook('preoutgoing', throw=True, source=source)
1006
1006
1007 # Set up some initial variables
1007 # Set up some initial variables
1008 # Make it easy to refer to self.changelog
1008 # Make it easy to refer to self.changelog
1009 cl = self.changelog
1009 cl = self.changelog
1010 # msng is short for missing - compute the list of changesets in this
1010 # msng is short for missing - compute the list of changesets in this
1011 # changegroup.
1011 # changegroup.
1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1012 msng_cl_lst, bases, heads = cl.nodesbetween(bases, heads)
1013 # Some bases may turn out to be superfluous, and some heads may be
1013 # Some bases may turn out to be superfluous, and some heads may be
1014 # too. nodesbetween will return the minimal set of bases and heads
1014 # too. nodesbetween will return the minimal set of bases and heads
1015 # necessary to re-create the changegroup.
1015 # necessary to re-create the changegroup.
1016
1016
1017 # Known heads are the list of heads that it is assumed the recipient
1017 # Known heads are the list of heads that it is assumed the recipient
1018 # of this changegroup will know about.
1018 # of this changegroup will know about.
1019 knownheads = {}
1019 knownheads = {}
1020 # We assume that all parents of bases are known heads.
1020 # We assume that all parents of bases are known heads.
1021 for n in bases:
1021 for n in bases:
1022 for p in cl.parents(n):
1022 for p in cl.parents(n):
1023 if p != nullid:
1023 if p != nullid:
1024 knownheads[p] = 1
1024 knownheads[p] = 1
1025 knownheads = knownheads.keys()
1025 knownheads = knownheads.keys()
1026 if knownheads:
1026 if knownheads:
1027 # Now that we know what heads are known, we can compute which
1027 # Now that we know what heads are known, we can compute which
1028 # changesets are known. The recipient must know about all
1028 # changesets are known. The recipient must know about all
1029 # changesets required to reach the known heads from the null
1029 # changesets required to reach the known heads from the null
1030 # changeset.
1030 # changeset.
1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1031 has_cl_set, junk, junk = cl.nodesbetween(None, knownheads)
1032 junk = None
1032 junk = None
1033 # Transform the list into an ersatz set.
1033 # Transform the list into an ersatz set.
1034 has_cl_set = dict.fromkeys(has_cl_set)
1034 has_cl_set = dict.fromkeys(has_cl_set)
1035 else:
1035 else:
1036 # If there were no known heads, the recipient cannot be assumed to
1036 # If there were no known heads, the recipient cannot be assumed to
1037 # know about any changesets.
1037 # know about any changesets.
1038 has_cl_set = {}
1038 has_cl_set = {}
1039
1039
1040 # Make it easy to refer to self.manifest
1040 # Make it easy to refer to self.manifest
1041 mnfst = self.manifest
1041 mnfst = self.manifest
1042 # We don't know which manifests are missing yet
1042 # We don't know which manifests are missing yet
1043 msng_mnfst_set = {}
1043 msng_mnfst_set = {}
1044 # Nor do we know which filenodes are missing.
1044 # Nor do we know which filenodes are missing.
1045 msng_filenode_set = {}
1045 msng_filenode_set = {}
1046
1046
1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1047 junk = mnfst.index[mnfst.count() - 1] # Get around a bug in lazyindex
1048 junk = None
1048 junk = None
1049
1049
1050 # A changeset always belongs to itself, so the changenode lookup
1050 # A changeset always belongs to itself, so the changenode lookup
1051 # function for a changenode is identity.
1051 # function for a changenode is identity.
1052 def identity(x):
1052 def identity(x):
1053 return x
1053 return x
1054
1054
1055 # A function generating function. Sets up an environment for the
1055 # A function generating function. Sets up an environment for the
1056 # inner function.
1056 # inner function.
1057 def cmp_by_rev_func(revlog):
1057 def cmp_by_rev_func(revlog):
1058 # Compare two nodes by their revision number in the environment's
1058 # Compare two nodes by their revision number in the environment's
1059 # revision history. Since the revision number both represents the
1059 # revision history. Since the revision number both represents the
1060 # most efficient order to read the nodes in, and represents a
1060 # most efficient order to read the nodes in, and represents a
1061 # topological sorting of the nodes, this function is often useful.
1061 # topological sorting of the nodes, this function is often useful.
1062 def cmp_by_rev(a, b):
1062 def cmp_by_rev(a, b):
1063 return cmp(revlog.rev(a), revlog.rev(b))
1063 return cmp(revlog.rev(a), revlog.rev(b))
1064 return cmp_by_rev
1064 return cmp_by_rev
1065
1065
1066 # If we determine that a particular file or manifest node must be a
1066 # If we determine that a particular file or manifest node must be a
1067 # node that the recipient of the changegroup will already have, we can
1067 # node that the recipient of the changegroup will already have, we can
1068 # also assume the recipient will have all the parents. This function
1068 # also assume the recipient will have all the parents. This function
1069 # prunes them from the set of missing nodes.
1069 # prunes them from the set of missing nodes.
1070 def prune_parents(revlog, hasset, msngset):
1070 def prune_parents(revlog, hasset, msngset):
1071 haslst = hasset.keys()
1071 haslst = hasset.keys()
1072 haslst.sort(cmp_by_rev_func(revlog))
1072 haslst.sort(cmp_by_rev_func(revlog))
1073 for node in haslst:
1073 for node in haslst:
1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1074 parentlst = [p for p in revlog.parents(node) if p != nullid]
1075 while parentlst:
1075 while parentlst:
1076 n = parentlst.pop()
1076 n = parentlst.pop()
1077 if n not in hasset:
1077 if n not in hasset:
1078 hasset[n] = 1
1078 hasset[n] = 1
1079 p = [p for p in revlog.parents(n) if p != nullid]
1079 p = [p for p in revlog.parents(n) if p != nullid]
1080 parentlst.extend(p)
1080 parentlst.extend(p)
1081 for n in hasset:
1081 for n in hasset:
1082 msngset.pop(n, None)
1082 msngset.pop(n, None)
1083
1083
1084 # This is a function generating function used to set up an environment
1084 # This is a function generating function used to set up an environment
1085 # for the inner function to execute in.
1085 # for the inner function to execute in.
1086 def manifest_and_file_collector(changedfileset):
1086 def manifest_and_file_collector(changedfileset):
1087 # This is an information gathering function that gathers
1087 # This is an information gathering function that gathers
1088 # information from each changeset node that goes out as part of
1088 # information from each changeset node that goes out as part of
1089 # the changegroup. The information gathered is a list of which
1089 # the changegroup. The information gathered is a list of which
1090 # manifest nodes are potentially required (the recipient may
1090 # manifest nodes are potentially required (the recipient may
1091 # already have them) and total list of all files which were
1091 # already have them) and total list of all files which were
1092 # changed in any changeset in the changegroup.
1092 # changed in any changeset in the changegroup.
1093 #
1093 #
1094 # We also remember the first changenode we saw any manifest
1094 # We also remember the first changenode we saw any manifest
1095 # referenced by so we can later determine which changenode 'owns'
1095 # referenced by so we can later determine which changenode 'owns'
1096 # the manifest.
1096 # the manifest.
1097 def collect_manifests_and_files(clnode):
1097 def collect_manifests_and_files(clnode):
1098 c = cl.read(clnode)
1098 c = cl.read(clnode)
1099 for f in c[3]:
1099 for f in c[3]:
1100 # This is to make sure we only have one instance of each
1100 # This is to make sure we only have one instance of each
1101 # filename string for each filename.
1101 # filename string for each filename.
1102 changedfileset.setdefault(f, f)
1102 changedfileset.setdefault(f, f)
1103 msng_mnfst_set.setdefault(c[0], clnode)
1103 msng_mnfst_set.setdefault(c[0], clnode)
1104 return collect_manifests_and_files
1104 return collect_manifests_and_files
1105
1105
1106 # Figure out which manifest nodes (of the ones we think might be part
1106 # Figure out which manifest nodes (of the ones we think might be part
1107 # of the changegroup) the recipient must know about and remove them
1107 # of the changegroup) the recipient must know about and remove them
1108 # from the changegroup.
1108 # from the changegroup.
1109 def prune_manifests():
1109 def prune_manifests():
1110 has_mnfst_set = {}
1110 has_mnfst_set = {}
1111 for n in msng_mnfst_set:
1111 for n in msng_mnfst_set:
1112 # If a 'missing' manifest thinks it belongs to a changenode
1112 # If a 'missing' manifest thinks it belongs to a changenode
1113 # the recipient is assumed to have, obviously the recipient
1113 # the recipient is assumed to have, obviously the recipient
1114 # must have that manifest.
1114 # must have that manifest.
1115 linknode = cl.node(mnfst.linkrev(n))
1115 linknode = cl.node(mnfst.linkrev(n))
1116 if linknode in has_cl_set:
1116 if linknode in has_cl_set:
1117 has_mnfst_set[n] = 1
1117 has_mnfst_set[n] = 1
1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1118 prune_parents(mnfst, has_mnfst_set, msng_mnfst_set)
1119
1119
1120 # Use the information collected in collect_manifests_and_files to say
1120 # Use the information collected in collect_manifests_and_files to say
1121 # which changenode any manifestnode belongs to.
1121 # which changenode any manifestnode belongs to.
1122 def lookup_manifest_link(mnfstnode):
1122 def lookup_manifest_link(mnfstnode):
1123 return msng_mnfst_set[mnfstnode]
1123 return msng_mnfst_set[mnfstnode]
1124
1124
1125 # A function generating function that sets up the initial environment
1125 # A function generating function that sets up the initial environment
1126 # the inner function.
1126 # the inner function.
1127 def filenode_collector(changedfiles):
1127 def filenode_collector(changedfiles):
1128 next_rev = [0]
1128 next_rev = [0]
1129 # This gathers information from each manifestnode included in the
1129 # This gathers information from each manifestnode included in the
1130 # changegroup about which filenodes the manifest node references
1130 # changegroup about which filenodes the manifest node references
1131 # so we can include those in the changegroup too.
1131 # so we can include those in the changegroup too.
1132 #
1132 #
1133 # It also remembers which changenode each filenode belongs to. It
1133 # It also remembers which changenode each filenode belongs to. It
1134 # does this by assuming the a filenode belongs to the changenode
1134 # does this by assuming the a filenode belongs to the changenode
1135 # the first manifest that references it belongs to.
1135 # the first manifest that references it belongs to.
1136 def collect_msng_filenodes(mnfstnode):
1136 def collect_msng_filenodes(mnfstnode):
1137 r = mnfst.rev(mnfstnode)
1137 r = mnfst.rev(mnfstnode)
1138 if r == next_rev[0]:
1138 if r == next_rev[0]:
1139 # If the last rev we looked at was the one just previous,
1139 # If the last rev we looked at was the one just previous,
1140 # we only need to see a diff.
1140 # we only need to see a diff.
1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1141 delta = mdiff.patchtext(mnfst.delta(mnfstnode))
1142 # For each line in the delta
1142 # For each line in the delta
1143 for dline in delta.splitlines():
1143 for dline in delta.splitlines():
1144 # get the filename and filenode for that line
1144 # get the filename and filenode for that line
1145 f, fnode = dline.split('\0')
1145 f, fnode = dline.split('\0')
1146 fnode = bin(fnode[:40])
1146 fnode = bin(fnode[:40])
1147 f = changedfiles.get(f, None)
1147 f = changedfiles.get(f, None)
1148 # And if the file is in the list of files we care
1148 # And if the file is in the list of files we care
1149 # about.
1149 # about.
1150 if f is not None:
1150 if f is not None:
1151 # Get the changenode this manifest belongs to
1151 # Get the changenode this manifest belongs to
1152 clnode = msng_mnfst_set[mnfstnode]
1152 clnode = msng_mnfst_set[mnfstnode]
1153 # Create the set of filenodes for the file if
1153 # Create the set of filenodes for the file if
1154 # there isn't one already.
1154 # there isn't one already.
1155 ndset = msng_filenode_set.setdefault(f, {})
1155 ndset = msng_filenode_set.setdefault(f, {})
1156 # And set the filenode's changelog node to the
1156 # And set the filenode's changelog node to the
1157 # manifest's if it hasn't been set already.
1157 # manifest's if it hasn't been set already.
1158 ndset.setdefault(fnode, clnode)
1158 ndset.setdefault(fnode, clnode)
1159 else:
1159 else:
1160 # Otherwise we need a full manifest.
1160 # Otherwise we need a full manifest.
1161 m = mnfst.read(mnfstnode)
1161 m = mnfst.read(mnfstnode)
1162 # For every file in we care about.
1162 # For every file in we care about.
1163 for f in changedfiles:
1163 for f in changedfiles:
1164 fnode = m.get(f, None)
1164 fnode = m.get(f, None)
1165 # If it's in the manifest
1165 # If it's in the manifest
1166 if fnode is not None:
1166 if fnode is not None:
1167 # See comments above.
1167 # See comments above.
1168 clnode = msng_mnfst_set[mnfstnode]
1168 clnode = msng_mnfst_set[mnfstnode]
1169 ndset = msng_filenode_set.setdefault(f, {})
1169 ndset = msng_filenode_set.setdefault(f, {})
1170 ndset.setdefault(fnode, clnode)
1170 ndset.setdefault(fnode, clnode)
1171 # Remember the revision we hope to see next.
1171 # Remember the revision we hope to see next.
1172 next_rev[0] = r + 1
1172 next_rev[0] = r + 1
1173 return collect_msng_filenodes
1173 return collect_msng_filenodes
1174
1174
1175 # We have a list of filenodes we think we need for a file, lets remove
1175 # We have a list of filenodes we think we need for a file, lets remove
1176 # all those we now the recipient must have.
1176 # all those we now the recipient must have.
1177 def prune_filenodes(f, filerevlog):
1177 def prune_filenodes(f, filerevlog):
1178 msngset = msng_filenode_set[f]
1178 msngset = msng_filenode_set[f]
1179 hasset = {}
1179 hasset = {}
1180 # If a 'missing' filenode thinks it belongs to a changenode we
1180 # If a 'missing' filenode thinks it belongs to a changenode we
1181 # assume the recipient must have, then the recipient must have
1181 # assume the recipient must have, then the recipient must have
1182 # that filenode.
1182 # that filenode.
1183 for n in msngset:
1183 for n in msngset:
1184 clnode = cl.node(filerevlog.linkrev(n))
1184 clnode = cl.node(filerevlog.linkrev(n))
1185 if clnode in has_cl_set:
1185 if clnode in has_cl_set:
1186 hasset[n] = 1
1186 hasset[n] = 1
1187 prune_parents(filerevlog, hasset, msngset)
1187 prune_parents(filerevlog, hasset, msngset)
1188
1188
1189 # A function generator function that sets up the a context for the
1189 # A function generator function that sets up the a context for the
1190 # inner function.
1190 # inner function.
1191 def lookup_filenode_link_func(fname):
1191 def lookup_filenode_link_func(fname):
1192 msngset = msng_filenode_set[fname]
1192 msngset = msng_filenode_set[fname]
1193 # Lookup the changenode the filenode belongs to.
1193 # Lookup the changenode the filenode belongs to.
1194 def lookup_filenode_link(fnode):
1194 def lookup_filenode_link(fnode):
1195 return msngset[fnode]
1195 return msngset[fnode]
1196 return lookup_filenode_link
1196 return lookup_filenode_link
1197
1197
1198 # Now that we have all theses utility functions to help out and
1198 # Now that we have all theses utility functions to help out and
1199 # logically divide up the task, generate the group.
1199 # logically divide up the task, generate the group.
1200 def gengroup():
1200 def gengroup():
1201 # The set of changed files starts empty.
1201 # The set of changed files starts empty.
1202 changedfiles = {}
1202 changedfiles = {}
1203 # Create a changenode group generator that will call our functions
1203 # Create a changenode group generator that will call our functions
1204 # back to lookup the owning changenode and collect information.
1204 # back to lookup the owning changenode and collect information.
1205 group = cl.group(msng_cl_lst, identity,
1205 group = cl.group(msng_cl_lst, identity,
1206 manifest_and_file_collector(changedfiles))
1206 manifest_and_file_collector(changedfiles))
1207 for chnk in group:
1207 for chnk in group:
1208 yield chnk
1208 yield chnk
1209
1209
1210 # The list of manifests has been collected by the generator
1210 # The list of manifests has been collected by the generator
1211 # calling our functions back.
1211 # calling our functions back.
1212 prune_manifests()
1212 prune_manifests()
1213 msng_mnfst_lst = msng_mnfst_set.keys()
1213 msng_mnfst_lst = msng_mnfst_set.keys()
1214 # Sort the manifestnodes by revision number.
1214 # Sort the manifestnodes by revision number.
1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1215 msng_mnfst_lst.sort(cmp_by_rev_func(mnfst))
1216 # Create a generator for the manifestnodes that calls our lookup
1216 # Create a generator for the manifestnodes that calls our lookup
1217 # and data collection functions back.
1217 # and data collection functions back.
1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1218 group = mnfst.group(msng_mnfst_lst, lookup_manifest_link,
1219 filenode_collector(changedfiles))
1219 filenode_collector(changedfiles))
1220 for chnk in group:
1220 for chnk in group:
1221 yield chnk
1221 yield chnk
1222
1222
1223 # These are no longer needed, dereference and toss the memory for
1223 # These are no longer needed, dereference and toss the memory for
1224 # them.
1224 # them.
1225 msng_mnfst_lst = None
1225 msng_mnfst_lst = None
1226 msng_mnfst_set.clear()
1226 msng_mnfst_set.clear()
1227
1227
1228 changedfiles = changedfiles.keys()
1228 changedfiles = changedfiles.keys()
1229 changedfiles.sort()
1229 changedfiles.sort()
1230 # Go through all our files in order sorted by name.
1230 # Go through all our files in order sorted by name.
1231 for fname in changedfiles:
1231 for fname in changedfiles:
1232 filerevlog = self.file(fname)
1232 filerevlog = self.file(fname)
1233 # Toss out the filenodes that the recipient isn't really
1233 # Toss out the filenodes that the recipient isn't really
1234 # missing.
1234 # missing.
1235 if msng_filenode_set.has_key(fname):
1235 if msng_filenode_set.has_key(fname):
1236 prune_filenodes(fname, filerevlog)
1236 prune_filenodes(fname, filerevlog)
1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1237 msng_filenode_lst = msng_filenode_set[fname].keys()
1238 else:
1238 else:
1239 msng_filenode_lst = []
1239 msng_filenode_lst = []
1240 # If any filenodes are left, generate the group for them,
1240 # If any filenodes are left, generate the group for them,
1241 # otherwise don't bother.
1241 # otherwise don't bother.
1242 if len(msng_filenode_lst) > 0:
1242 if len(msng_filenode_lst) > 0:
1243 yield struct.pack(">l", len(fname) + 4) + fname
1243 yield struct.pack(">l", len(fname) + 4) + fname
1244 # Sort the filenodes by their revision #
1244 # Sort the filenodes by their revision #
1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1245 msng_filenode_lst.sort(cmp_by_rev_func(filerevlog))
1246 # Create a group generator and only pass in a changenode
1246 # Create a group generator and only pass in a changenode
1247 # lookup function as we need to collect no information
1247 # lookup function as we need to collect no information
1248 # from filenodes.
1248 # from filenodes.
1249 group = filerevlog.group(msng_filenode_lst,
1249 group = filerevlog.group(msng_filenode_lst,
1250 lookup_filenode_link_func(fname))
1250 lookup_filenode_link_func(fname))
1251 for chnk in group:
1251 for chnk in group:
1252 yield chnk
1252 yield chnk
1253 if msng_filenode_set.has_key(fname):
1253 if msng_filenode_set.has_key(fname):
1254 # Don't need this anymore, toss it to free memory.
1254 # Don't need this anymore, toss it to free memory.
1255 del msng_filenode_set[fname]
1255 del msng_filenode_set[fname]
1256 # Signal that no more groups are left.
1256 # Signal that no more groups are left.
1257 yield struct.pack(">l", 0)
1257 yield struct.pack(">l", 0)
1258
1258
1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1259 self.hook('outgoing', node=hex(msng_cl_lst[0]), source=source)
1260
1260
1261 return util.chunkbuffer(gengroup())
1261 return util.chunkbuffer(gengroup())
1262
1262
1263 def changegroup(self, basenodes, source):
1263 def changegroup(self, basenodes, source):
1264 """Generate a changegroup of all nodes that we have that a recipient
1264 """Generate a changegroup of all nodes that we have that a recipient
1265 doesn't.
1265 doesn't.
1266
1266
1267 This is much easier than the previous function as we can assume that
1267 This is much easier than the previous function as we can assume that
1268 the recipient has any changenode we aren't sending them."""
1268 the recipient has any changenode we aren't sending them."""
1269
1269
1270 self.hook('preoutgoing', throw=True, source=source)
1270 self.hook('preoutgoing', throw=True, source=source)
1271
1271
1272 cl = self.changelog
1272 cl = self.changelog
1273 nodes = cl.nodesbetween(basenodes, None)[0]
1273 nodes = cl.nodesbetween(basenodes, None)[0]
1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1274 revset = dict.fromkeys([cl.rev(n) for n in nodes])
1275
1275
1276 def identity(x):
1276 def identity(x):
1277 return x
1277 return x
1278
1278
1279 def gennodelst(revlog):
1279 def gennodelst(revlog):
1280 for r in xrange(0, revlog.count()):
1280 for r in xrange(0, revlog.count()):
1281 n = revlog.node(r)
1281 n = revlog.node(r)
1282 if revlog.linkrev(n) in revset:
1282 if revlog.linkrev(n) in revset:
1283 yield n
1283 yield n
1284
1284
1285 def changed_file_collector(changedfileset):
1285 def changed_file_collector(changedfileset):
1286 def collect_changed_files(clnode):
1286 def collect_changed_files(clnode):
1287 c = cl.read(clnode)
1287 c = cl.read(clnode)
1288 for fname in c[3]:
1288 for fname in c[3]:
1289 changedfileset[fname] = 1
1289 changedfileset[fname] = 1
1290 return collect_changed_files
1290 return collect_changed_files
1291
1291
1292 def lookuprevlink_func(revlog):
1292 def lookuprevlink_func(revlog):
1293 def lookuprevlink(n):
1293 def lookuprevlink(n):
1294 return cl.node(revlog.linkrev(n))
1294 return cl.node(revlog.linkrev(n))
1295 return lookuprevlink
1295 return lookuprevlink
1296
1296
1297 def gengroup():
1297 def gengroup():
1298 # construct a list of all changed files
1298 # construct a list of all changed files
1299 changedfiles = {}
1299 changedfiles = {}
1300
1300
1301 for chnk in cl.group(nodes, identity,
1301 for chnk in cl.group(nodes, identity,
1302 changed_file_collector(changedfiles)):
1302 changed_file_collector(changedfiles)):
1303 yield chnk
1303 yield chnk
1304 changedfiles = changedfiles.keys()
1304 changedfiles = changedfiles.keys()
1305 changedfiles.sort()
1305 changedfiles.sort()
1306
1306
1307 mnfst = self.manifest
1307 mnfst = self.manifest
1308 nodeiter = gennodelst(mnfst)
1308 nodeiter = gennodelst(mnfst)
1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1309 for chnk in mnfst.group(nodeiter, lookuprevlink_func(mnfst)):
1310 yield chnk
1310 yield chnk
1311
1311
1312 for fname in changedfiles:
1312 for fname in changedfiles:
1313 filerevlog = self.file(fname)
1313 filerevlog = self.file(fname)
1314 nodeiter = gennodelst(filerevlog)
1314 nodeiter = gennodelst(filerevlog)
1315 nodeiter = list(nodeiter)
1315 nodeiter = list(nodeiter)
1316 if nodeiter:
1316 if nodeiter:
1317 yield struct.pack(">l", len(fname) + 4) + fname
1317 yield struct.pack(">l", len(fname) + 4) + fname
1318 lookup = lookuprevlink_func(filerevlog)
1318 lookup = lookuprevlink_func(filerevlog)
1319 for chnk in filerevlog.group(nodeiter, lookup):
1319 for chnk in filerevlog.group(nodeiter, lookup):
1320 yield chnk
1320 yield chnk
1321
1321
1322 yield struct.pack(">l", 0)
1322 yield struct.pack(">l", 0)
1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1323 self.hook('outgoing', node=hex(nodes[0]), source=source)
1324
1324
1325 return util.chunkbuffer(gengroup())
1325 return util.chunkbuffer(gengroup())
1326
1326
1327 def addchangegroup(self, source):
1327 def addchangegroup(self, source):
1328
1328
1329 def getchunk():
1329 def getchunk():
1330 d = source.read(4)
1330 d = source.read(4)
1331 if not d:
1331 if not d:
1332 return ""
1332 return ""
1333 l = struct.unpack(">l", d)[0]
1333 l = struct.unpack(">l", d)[0]
1334 if l <= 4:
1334 if l <= 4:
1335 return ""
1335 return ""
1336 d = source.read(l - 4)
1336 d = source.read(l - 4)
1337 if len(d) < l - 4:
1337 if len(d) < l - 4:
1338 raise repo.RepoError(_("premature EOF reading chunk"
1338 raise repo.RepoError(_("premature EOF reading chunk"
1339 " (got %d bytes, expected %d)")
1339 " (got %d bytes, expected %d)")
1340 % (len(d), l - 4))
1340 % (len(d), l - 4))
1341 return d
1341 return d
1342
1342
1343 def getgroup():
1343 def getgroup():
1344 while 1:
1344 while 1:
1345 c = getchunk()
1345 c = getchunk()
1346 if not c:
1346 if not c:
1347 break
1347 break
1348 yield c
1348 yield c
1349
1349
1350 def csmap(x):
1350 def csmap(x):
1351 self.ui.debug(_("add changeset %s\n") % short(x))
1351 self.ui.debug(_("add changeset %s\n") % short(x))
1352 return self.changelog.count()
1352 return self.changelog.count()
1353
1353
1354 def revmap(x):
1354 def revmap(x):
1355 return self.changelog.rev(x)
1355 return self.changelog.rev(x)
1356
1356
1357 if not source:
1357 if not source:
1358 return
1358 return
1359
1359
1360 self.hook('prechangegroup', throw=True)
1360 self.hook('prechangegroup', throw=True)
1361
1361
1362 changesets = files = revisions = 0
1362 changesets = files = revisions = 0
1363
1363
1364 tr = self.transaction()
1364 tr = self.transaction()
1365
1365
1366 oldheads = len(self.changelog.heads())
1366 oldheads = len(self.changelog.heads())
1367
1367
1368 # pull off the changeset group
1368 # pull off the changeset group
1369 self.ui.status(_("adding changesets\n"))
1369 self.ui.status(_("adding changesets\n"))
1370 co = self.changelog.tip()
1370 co = self.changelog.tip()
1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1371 cn = self.changelog.addgroup(getgroup(), csmap, tr, 1) # unique
1372 cnr, cor = map(self.changelog.rev, (cn, co))
1372 cnr, cor = map(self.changelog.rev, (cn, co))
1373 if cn == nullid:
1373 if cn == nullid:
1374 cnr = cor
1374 cnr = cor
1375 changesets = cnr - cor
1375 changesets = cnr - cor
1376
1376
1377 # pull off the manifest group
1377 # pull off the manifest group
1378 self.ui.status(_("adding manifests\n"))
1378 self.ui.status(_("adding manifests\n"))
1379 mm = self.manifest.tip()
1379 mm = self.manifest.tip()
1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1380 mo = self.manifest.addgroup(getgroup(), revmap, tr)
1381
1381
1382 # process the files
1382 # process the files
1383 self.ui.status(_("adding file changes\n"))
1383 self.ui.status(_("adding file changes\n"))
1384 while 1:
1384 while 1:
1385 f = getchunk()
1385 f = getchunk()
1386 if not f:
1386 if not f:
1387 break
1387 break
1388 self.ui.debug(_("adding %s revisions\n") % f)
1388 self.ui.debug(_("adding %s revisions\n") % f)
1389 fl = self.file(f)
1389 fl = self.file(f)
1390 o = fl.count()
1390 o = fl.count()
1391 n = fl.addgroup(getgroup(), revmap, tr)
1391 n = fl.addgroup(getgroup(), revmap, tr)
1392 revisions += fl.count() - o
1392 revisions += fl.count() - o
1393 files += 1
1393 files += 1
1394
1394
1395 newheads = len(self.changelog.heads())
1395 newheads = len(self.changelog.heads())
1396 heads = ""
1396 heads = ""
1397 if oldheads and newheads > oldheads:
1397 if oldheads and newheads > oldheads:
1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1398 heads = _(" (+%d heads)") % (newheads - oldheads)
1399
1399
1400 self.ui.status(_("added %d changesets"
1400 self.ui.status(_("added %d changesets"
1401 " with %d changes to %d files%s\n")
1401 " with %d changes to %d files%s\n")
1402 % (changesets, revisions, files, heads))
1402 % (changesets, revisions, files, heads))
1403
1403
1404 self.hook('pretxnchangegroup', throw=True,
1404 self.hook('pretxnchangegroup', throw=True,
1405 node=hex(self.changelog.node(cor+1)))
1405 node=hex(self.changelog.node(cor+1)))
1406
1406
1407 tr.close()
1407 tr.close()
1408
1408
1409 if changesets > 0:
1409 if changesets > 0:
1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1410 self.hook("changegroup", node=hex(self.changelog.node(cor+1)))
1411
1411
1412 for i in range(cor + 1, cnr + 1):
1412 for i in range(cor + 1, cnr + 1):
1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1413 self.hook("incoming", node=hex(self.changelog.node(i)))
1414
1414
1415 def update(self, node, allow=False, force=False, choose=None,
1415 def update(self, node, allow=False, force=False, choose=None,
1416 moddirstate=True, forcemerge=False, wlock=None):
1416 moddirstate=True, forcemerge=False, wlock=None):
1417 pl = self.dirstate.parents()
1417 pl = self.dirstate.parents()
1418 if not force and pl[1] != nullid:
1418 if not force and pl[1] != nullid:
1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1419 self.ui.warn(_("aborting: outstanding uncommitted merges\n"))
1420 return 1
1420 return 1
1421
1421
1422 err = False
1422 err = False
1423
1423
1424 p1, p2 = pl[0], node
1424 p1, p2 = pl[0], node
1425 pa = self.changelog.ancestor(p1, p2)
1425 pa = self.changelog.ancestor(p1, p2)
1426 m1n = self.changelog.read(p1)[0]
1426 m1n = self.changelog.read(p1)[0]
1427 m2n = self.changelog.read(p2)[0]
1427 m2n = self.changelog.read(p2)[0]
1428 man = self.manifest.ancestor(m1n, m2n)
1428 man = self.manifest.ancestor(m1n, m2n)
1429 m1 = self.manifest.read(m1n)
1429 m1 = self.manifest.read(m1n)
1430 mf1 = self.manifest.readflags(m1n)
1430 mf1 = self.manifest.readflags(m1n)
1431 m2 = self.manifest.read(m2n).copy()
1431 m2 = self.manifest.read(m2n).copy()
1432 mf2 = self.manifest.readflags(m2n)
1432 mf2 = self.manifest.readflags(m2n)
1433 ma = self.manifest.read(man)
1433 ma = self.manifest.read(man)
1434 mfa = self.manifest.readflags(man)
1434 mfa = self.manifest.readflags(man)
1435
1435
1436 modified, added, removed, deleted, unknown = self.changes()
1436 modified, added, removed, deleted, unknown = self.changes()
1437
1437
1438 # is this a jump, or a merge? i.e. is there a linear path
1438 # is this a jump, or a merge? i.e. is there a linear path
1439 # from p1 to p2?
1439 # from p1 to p2?
1440 linear_path = (pa == p1 or pa == p2)
1440 linear_path = (pa == p1 or pa == p2)
1441
1441
1442 if allow and linear_path:
1442 if allow and linear_path:
1443 raise util.Abort(_("there is nothing to merge, "
1443 raise util.Abort(_("there is nothing to merge, "
1444 "just use 'hg update'"))
1444 "just use 'hg update'"))
1445 if allow and not forcemerge:
1445 if allow and not forcemerge:
1446 if modified or added or removed:
1446 if modified or added or removed:
1447 raise util.Abort(_("outstanding uncommited changes"))
1447 raise util.Abort(_("outstanding uncommited changes"))
1448 if not forcemerge and not force:
1448 if not forcemerge and not force:
1449 for f in unknown:
1449 for f in unknown:
1450 if f in m2:
1450 if f in m2:
1451 t1 = self.wread(f)
1451 t1 = self.wread(f)
1452 t2 = self.file(f).read(m2[f])
1452 t2 = self.file(f).read(m2[f])
1453 if cmp(t1, t2) != 0:
1453 if cmp(t1, t2) != 0:
1454 raise util.Abort(_("'%s' already exists in the working"
1454 raise util.Abort(_("'%s' already exists in the working"
1455 " dir and differs from remote") % f)
1455 " dir and differs from remote") % f)
1456
1456
1457 # resolve the manifest to determine which files
1457 # resolve the manifest to determine which files
1458 # we care about merging
1458 # we care about merging
1459 self.ui.note(_("resolving manifests\n"))
1459 self.ui.note(_("resolving manifests\n"))
1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1460 self.ui.debug(_(" force %s allow %s moddirstate %s linear %s\n") %
1461 (force, allow, moddirstate, linear_path))
1461 (force, allow, moddirstate, linear_path))
1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1462 self.ui.debug(_(" ancestor %s local %s remote %s\n") %
1463 (short(man), short(m1n), short(m2n)))
1463 (short(man), short(m1n), short(m2n)))
1464
1464
1465 merge = {}
1465 merge = {}
1466 get = {}
1466 get = {}
1467 remove = []
1467 remove = []
1468
1468
1469 # construct a working dir manifest
1469 # construct a working dir manifest
1470 mw = m1.copy()
1470 mw = m1.copy()
1471 mfw = mf1.copy()
1471 mfw = mf1.copy()
1472 umap = dict.fromkeys(unknown)
1472 umap = dict.fromkeys(unknown)
1473
1473
1474 for f in added + modified + unknown:
1474 for f in added + modified + unknown:
1475 mw[f] = ""
1475 mw[f] = ""
1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1476 mfw[f] = util.is_exec(self.wjoin(f), mfw.get(f, False))
1477
1477
1478 if moddirstate and not wlock:
1478 if moddirstate and not wlock:
1479 wlock = self.wlock()
1479 wlock = self.wlock()
1480
1480
1481 for f in deleted + removed:
1481 for f in deleted + removed:
1482 if f in mw:
1482 if f in mw:
1483 del mw[f]
1483 del mw[f]
1484
1484
1485 # If we're jumping between revisions (as opposed to merging),
1485 # If we're jumping between revisions (as opposed to merging),
1486 # and if neither the working directory nor the target rev has
1486 # and if neither the working directory nor the target rev has
1487 # the file, then we need to remove it from the dirstate, to
1487 # the file, then we need to remove it from the dirstate, to
1488 # prevent the dirstate from listing the file when it is no
1488 # prevent the dirstate from listing the file when it is no
1489 # longer in the manifest.
1489 # longer in the manifest.
1490 if moddirstate and linear_path and f not in m2:
1490 if moddirstate and linear_path and f not in m2:
1491 self.dirstate.forget((f,))
1491 self.dirstate.forget((f,))
1492
1492
1493 # Compare manifests
1493 # Compare manifests
1494 for f, n in mw.iteritems():
1494 for f, n in mw.iteritems():
1495 if choose and not choose(f):
1495 if choose and not choose(f):
1496 continue
1496 continue
1497 if f in m2:
1497 if f in m2:
1498 s = 0
1498 s = 0
1499
1499
1500 # is the wfile new since m1, and match m2?
1500 # is the wfile new since m1, and match m2?
1501 if f not in m1:
1501 if f not in m1:
1502 t1 = self.wread(f)
1502 t1 = self.wread(f)
1503 t2 = self.file(f).read(m2[f])
1503 t2 = self.file(f).read(m2[f])
1504 if cmp(t1, t2) == 0:
1504 if cmp(t1, t2) == 0:
1505 n = m2[f]
1505 n = m2[f]
1506 del t1, t2
1506 del t1, t2
1507
1507
1508 # are files different?
1508 # are files different?
1509 if n != m2[f]:
1509 if n != m2[f]:
1510 a = ma.get(f, nullid)
1510 a = ma.get(f, nullid)
1511 # are both different from the ancestor?
1511 # are both different from the ancestor?
1512 if n != a and m2[f] != a:
1512 if n != a and m2[f] != a:
1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1513 self.ui.debug(_(" %s versions differ, resolve\n") % f)
1514 # merge executable bits
1514 # merge executable bits
1515 # "if we changed or they changed, change in merge"
1515 # "if we changed or they changed, change in merge"
1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1516 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1517 mode = ((a^b) | (a^c)) ^ a
1517 mode = ((a^b) | (a^c)) ^ a
1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1518 merge[f] = (m1.get(f, nullid), m2[f], mode)
1519 s = 1
1519 s = 1
1520 # are we clobbering?
1520 # are we clobbering?
1521 # is remote's version newer?
1521 # is remote's version newer?
1522 # or are we going back in time?
1522 # or are we going back in time?
1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1523 elif force or m2[f] != a or (p2 == pa and mw[f] == m1[f]):
1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1524 self.ui.debug(_(" remote %s is newer, get\n") % f)
1525 get[f] = m2[f]
1525 get[f] = m2[f]
1526 s = 1
1526 s = 1
1527 elif f in umap:
1527 elif f in umap:
1528 # this unknown file is the same as the checkout
1528 # this unknown file is the same as the checkout
1529 get[f] = m2[f]
1529 get[f] = m2[f]
1530
1530
1531 if not s and mfw[f] != mf2[f]:
1531 if not s and mfw[f] != mf2[f]:
1532 if force:
1532 if force:
1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1533 self.ui.debug(_(" updating permissions for %s\n") % f)
1534 util.set_exec(self.wjoin(f), mf2[f])
1534 util.set_exec(self.wjoin(f), mf2[f])
1535 else:
1535 else:
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1536 a, b, c = mfa.get(f, 0), mfw[f], mf2[f]
1537 mode = ((a^b) | (a^c)) ^ a
1537 mode = ((a^b) | (a^c)) ^ a
1538 if mode != b:
1538 if mode != b:
1539 self.ui.debug(_(" updating permissions for %s\n")
1539 self.ui.debug(_(" updating permissions for %s\n")
1540 % f)
1540 % f)
1541 util.set_exec(self.wjoin(f), mode)
1541 util.set_exec(self.wjoin(f), mode)
1542 del m2[f]
1542 del m2[f]
1543 elif f in ma:
1543 elif f in ma:
1544 if n != ma[f]:
1544 if n != ma[f]:
1545 r = _("d")
1545 r = _("d")
1546 if not force and (linear_path or allow):
1546 if not force and (linear_path or allow):
1547 r = self.ui.prompt(
1547 r = self.ui.prompt(
1548 (_(" local changed %s which remote deleted\n") % f) +
1548 (_(" local changed %s which remote deleted\n") % f) +
1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1549 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1550 if r == _("d"):
1550 if r == _("d"):
1551 remove.append(f)
1551 remove.append(f)
1552 else:
1552 else:
1553 self.ui.debug(_("other deleted %s\n") % f)
1553 self.ui.debug(_("other deleted %s\n") % f)
1554 remove.append(f) # other deleted it
1554 remove.append(f) # other deleted it
1555 else:
1555 else:
1556 # file is created on branch or in working directory
1556 # file is created on branch or in working directory
1557 if force and f not in umap:
1557 if force and f not in umap:
1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1558 self.ui.debug(_("remote deleted %s, clobbering\n") % f)
1559 remove.append(f)
1559 remove.append(f)
1560 elif n == m1.get(f, nullid): # same as parent
1560 elif n == m1.get(f, nullid): # same as parent
1561 if p2 == pa: # going backwards?
1561 if p2 == pa: # going backwards?
1562 self.ui.debug(_("remote deleted %s\n") % f)
1562 self.ui.debug(_("remote deleted %s\n") % f)
1563 remove.append(f)
1563 remove.append(f)
1564 else:
1564 else:
1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1565 self.ui.debug(_("local modified %s, keeping\n") % f)
1566 else:
1566 else:
1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1567 self.ui.debug(_("working dir created %s, keeping\n") % f)
1568
1568
1569 for f, n in m2.iteritems():
1569 for f, n in m2.iteritems():
1570 if choose and not choose(f):
1570 if choose and not choose(f):
1571 continue
1571 continue
1572 if f[0] == "/":
1572 if f[0] == "/":
1573 continue
1573 continue
1574 if f in ma and n != ma[f]:
1574 if f in ma and n != ma[f]:
1575 r = _("k")
1575 r = _("k")
1576 if not force and (linear_path or allow):
1576 if not force and (linear_path or allow):
1577 r = self.ui.prompt(
1577 r = self.ui.prompt(
1578 (_("remote changed %s which local deleted\n") % f) +
1578 (_("remote changed %s which local deleted\n") % f) +
1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1579 _("(k)eep or (d)elete?"), _("[kd]"), _("k"))
1580 if r == _("k"):
1580 if r == _("k"):
1581 get[f] = n
1581 get[f] = n
1582 elif f not in ma:
1582 elif f not in ma:
1583 self.ui.debug(_("remote created %s\n") % f)
1583 self.ui.debug(_("remote created %s\n") % f)
1584 get[f] = n
1584 get[f] = n
1585 else:
1585 else:
1586 if force or p2 == pa: # going backwards?
1586 if force or p2 == pa: # going backwards?
1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1587 self.ui.debug(_("local deleted %s, recreating\n") % f)
1588 get[f] = n
1588 get[f] = n
1589 else:
1589 else:
1590 self.ui.debug(_("local deleted %s\n") % f)
1590 self.ui.debug(_("local deleted %s\n") % f)
1591
1591
1592 del mw, m1, m2, ma
1592 del mw, m1, m2, ma
1593
1593
1594 if force:
1594 if force:
1595 for f in merge:
1595 for f in merge:
1596 get[f] = merge[f][1]
1596 get[f] = merge[f][1]
1597 merge = {}
1597 merge = {}
1598
1598
1599 if linear_path or force:
1599 if linear_path or force:
1600 # we don't need to do any magic, just jump to the new rev
1600 # we don't need to do any magic, just jump to the new rev
1601 branch_merge = False
1601 branch_merge = False
1602 p1, p2 = p2, nullid
1602 p1, p2 = p2, nullid
1603 else:
1603 else:
1604 if not allow:
1604 if not allow:
1605 self.ui.status(_("this update spans a branch"
1605 self.ui.status(_("this update spans a branch"
1606 " affecting the following files:\n"))
1606 " affecting the following files:\n"))
1607 fl = merge.keys() + get.keys()
1607 fl = merge.keys() + get.keys()
1608 fl.sort()
1608 fl.sort()
1609 for f in fl:
1609 for f in fl:
1610 cf = ""
1610 cf = ""
1611 if f in merge:
1611 if f in merge:
1612 cf = _(" (resolve)")
1612 cf = _(" (resolve)")
1613 self.ui.status(" %s%s\n" % (f, cf))
1613 self.ui.status(" %s%s\n" % (f, cf))
1614 self.ui.warn(_("aborting update spanning branches!\n"))
1614 self.ui.warn(_("aborting update spanning branches!\n"))
1615 self.ui.status(_("(use update -m to merge across branches"
1615 self.ui.status(_("(use update -m to merge across branches"
1616 " or -C to lose changes)\n"))
1616 " or -C to lose changes)\n"))
1617 return 1
1617 return 1
1618 branch_merge = True
1618 branch_merge = True
1619
1619
1620 # get the files we don't need to change
1620 # get the files we don't need to change
1621 files = get.keys()
1621 files = get.keys()
1622 files.sort()
1622 files.sort()
1623 for f in files:
1623 for f in files:
1624 if f[0] == "/":
1624 if f[0] == "/":
1625 continue
1625 continue
1626 self.ui.note(_("getting %s\n") % f)
1626 self.ui.note(_("getting %s\n") % f)
1627 t = self.file(f).read(get[f])
1627 t = self.file(f).read(get[f])
1628 self.wwrite(f, t)
1628 self.wwrite(f, t)
1629 util.set_exec(self.wjoin(f), mf2[f])
1629 util.set_exec(self.wjoin(f), mf2[f])
1630 if moddirstate:
1630 if moddirstate:
1631 if branch_merge:
1631 if branch_merge:
1632 self.dirstate.update([f], 'n', st_mtime=-1)
1632 self.dirstate.update([f], 'n', st_mtime=-1)
1633 else:
1633 else:
1634 self.dirstate.update([f], 'n')
1634 self.dirstate.update([f], 'n')
1635
1635
1636 # merge the tricky bits
1636 # merge the tricky bits
1637 files = merge.keys()
1637 files = merge.keys()
1638 files.sort()
1638 files.sort()
1639 xp1 = hex(p1)
1640 xp2 = hex(p2)
1639 for f in files:
1641 for f in files:
1640 self.ui.status(_("merging %s\n") % f)
1642 self.ui.status(_("merging %s\n") % f)
1641 my, other, flag = merge[f]
1643 my, other, flag = merge[f]
1642 ret = self.merge3(f, my, other)
1644 ret = self.merge3(f, my, other, xp1, xp2)
1643 if ret:
1645 if ret:
1644 err = True
1646 err = True
1645 util.set_exec(self.wjoin(f), flag)
1647 util.set_exec(self.wjoin(f), flag)
1646 if moddirstate:
1648 if moddirstate:
1647 if branch_merge:
1649 if branch_merge:
1648 # We've done a branch merge, mark this file as merged
1650 # We've done a branch merge, mark this file as merged
1649 # so that we properly record the merger later
1651 # so that we properly record the merger later
1650 self.dirstate.update([f], 'm')
1652 self.dirstate.update([f], 'm')
1651 else:
1653 else:
1652 # We've update-merged a locally modified file, so
1654 # We've update-merged a locally modified file, so
1653 # we set the dirstate to emulate a normal checkout
1655 # we set the dirstate to emulate a normal checkout
1654 # of that file some time in the past. Thus our
1656 # of that file some time in the past. Thus our
1655 # merge will appear as a normal local file
1657 # merge will appear as a normal local file
1656 # modification.
1658 # modification.
1657 f_len = len(self.file(f).read(other))
1659 f_len = len(self.file(f).read(other))
1658 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1660 self.dirstate.update([f], 'n', st_size=f_len, st_mtime=-1)
1659
1661
1660 remove.sort()
1662 remove.sort()
1661 for f in remove:
1663 for f in remove:
1662 self.ui.note(_("removing %s\n") % f)
1664 self.ui.note(_("removing %s\n") % f)
1663 util.audit_path(f)
1665 util.audit_path(f)
1664 try:
1666 try:
1665 util.unlink(self.wjoin(f))
1667 util.unlink(self.wjoin(f))
1666 except OSError, inst:
1668 except OSError, inst:
1667 if inst.errno != errno.ENOENT:
1669 if inst.errno != errno.ENOENT:
1668 self.ui.warn(_("update failed to remove %s: %s!\n") %
1670 self.ui.warn(_("update failed to remove %s: %s!\n") %
1669 (f, inst.strerror))
1671 (f, inst.strerror))
1670 if moddirstate:
1672 if moddirstate:
1671 if branch_merge:
1673 if branch_merge:
1672 self.dirstate.update(remove, 'r')
1674 self.dirstate.update(remove, 'r')
1673 else:
1675 else:
1674 self.dirstate.forget(remove)
1676 self.dirstate.forget(remove)
1675
1677
1676 if moddirstate:
1678 if moddirstate:
1677 self.dirstate.setparents(p1, p2)
1679 self.dirstate.setparents(p1, p2)
1678 return err
1680 return err
1679
1681
1680 def merge3(self, fn, my, other):
1682 def merge3(self, fn, my, other, p1, p2):
1681 """perform a 3-way merge in the working directory"""
1683 """perform a 3-way merge in the working directory"""
1682
1684
1683 def temp(prefix, node):
1685 def temp(prefix, node):
1684 pre = "%s~%s." % (os.path.basename(fn), prefix)
1686 pre = "%s~%s." % (os.path.basename(fn), prefix)
1685 (fd, name) = tempfile.mkstemp("", pre)
1687 (fd, name) = tempfile.mkstemp("", pre)
1686 f = os.fdopen(fd, "wb")
1688 f = os.fdopen(fd, "wb")
1687 self.wwrite(fn, fl.read(node), f)
1689 self.wwrite(fn, fl.read(node), f)
1688 f.close()
1690 f.close()
1689 return name
1691 return name
1690
1692
1691 fl = self.file(fn)
1693 fl = self.file(fn)
1692 base = fl.ancestor(my, other)
1694 base = fl.ancestor(my, other)
1693 a = self.wjoin(fn)
1695 a = self.wjoin(fn)
1694 b = temp("base", base)
1696 b = temp("base", base)
1695 c = temp("other", other)
1697 c = temp("other", other)
1696
1698
1697 self.ui.note(_("resolving %s\n") % fn)
1699 self.ui.note(_("resolving %s\n") % fn)
1698 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1700 self.ui.debug(_("file %s: my %s other %s ancestor %s\n") %
1699 (fn, short(my), short(other), short(base)))
1701 (fn, short(my), short(other), short(base)))
1700
1702
1701 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1703 cmd = (os.environ.get("HGMERGE") or self.ui.config("ui", "merge")
1702 or "hgmerge")
1704 or "hgmerge")
1703 r = os.system('%s "%s" "%s" "%s"' % (cmd, a, b, c))
1705 r = util.system('%s "%s" "%s" "%s"' % (cmd, a, b, c),
1706 environ={'HG_ROOT': self.root,
1707 'HG_FILE': fn,
1708 'HG_MY_NODE': p1,
1709 'HG_OTHER_NODE': p2,
1710 'HG_FILE_MY_NODE': hex(my),
1711 'HG_FILE_OTHER_NODE': hex(other),
1712 'HG_FILE_BASE_NODE': hex(base)})
1704 if r:
1713 if r:
1705 self.ui.warn(_("merging %s failed!\n") % fn)
1714 self.ui.warn(_("merging %s failed!\n") % fn)
1706
1715
1707 os.unlink(b)
1716 os.unlink(b)
1708 os.unlink(c)
1717 os.unlink(c)
1709 return r
1718 return r
1710
1719
1711 def verify(self):
1720 def verify(self):
1712 filelinkrevs = {}
1721 filelinkrevs = {}
1713 filenodes = {}
1722 filenodes = {}
1714 changesets = revisions = files = 0
1723 changesets = revisions = files = 0
1715 errors = [0]
1724 errors = [0]
1716 neededmanifests = {}
1725 neededmanifests = {}
1717
1726
1718 def err(msg):
1727 def err(msg):
1719 self.ui.warn(msg + "\n")
1728 self.ui.warn(msg + "\n")
1720 errors[0] += 1
1729 errors[0] += 1
1721
1730
1722 def checksize(obj, name):
1731 def checksize(obj, name):
1723 d = obj.checksize()
1732 d = obj.checksize()
1724 if d[0]:
1733 if d[0]:
1725 err(_("%s data length off by %d bytes") % (name, d[0]))
1734 err(_("%s data length off by %d bytes") % (name, d[0]))
1726 if d[1]:
1735 if d[1]:
1727 err(_("%s index contains %d extra bytes") % (name, d[1]))
1736 err(_("%s index contains %d extra bytes") % (name, d[1]))
1728
1737
1729 seen = {}
1738 seen = {}
1730 self.ui.status(_("checking changesets\n"))
1739 self.ui.status(_("checking changesets\n"))
1731 checksize(self.changelog, "changelog")
1740 checksize(self.changelog, "changelog")
1732
1741
1733 for i in range(self.changelog.count()):
1742 for i in range(self.changelog.count()):
1734 changesets += 1
1743 changesets += 1
1735 n = self.changelog.node(i)
1744 n = self.changelog.node(i)
1736 l = self.changelog.linkrev(n)
1745 l = self.changelog.linkrev(n)
1737 if l != i:
1746 if l != i:
1738 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1747 err(_("incorrect link (%d) for changeset revision %d") %(l, i))
1739 if n in seen:
1748 if n in seen:
1740 err(_("duplicate changeset at revision %d") % i)
1749 err(_("duplicate changeset at revision %d") % i)
1741 seen[n] = 1
1750 seen[n] = 1
1742
1751
1743 for p in self.changelog.parents(n):
1752 for p in self.changelog.parents(n):
1744 if p not in self.changelog.nodemap:
1753 if p not in self.changelog.nodemap:
1745 err(_("changeset %s has unknown parent %s") %
1754 err(_("changeset %s has unknown parent %s") %
1746 (short(n), short(p)))
1755 (short(n), short(p)))
1747 try:
1756 try:
1748 changes = self.changelog.read(n)
1757 changes = self.changelog.read(n)
1749 except KeyboardInterrupt:
1758 except KeyboardInterrupt:
1750 self.ui.warn(_("interrupted"))
1759 self.ui.warn(_("interrupted"))
1751 raise
1760 raise
1752 except Exception, inst:
1761 except Exception, inst:
1753 err(_("unpacking changeset %s: %s") % (short(n), inst))
1762 err(_("unpacking changeset %s: %s") % (short(n), inst))
1754 continue
1763 continue
1755
1764
1756 neededmanifests[changes[0]] = n
1765 neededmanifests[changes[0]] = n
1757
1766
1758 for f in changes[3]:
1767 for f in changes[3]:
1759 filelinkrevs.setdefault(f, []).append(i)
1768 filelinkrevs.setdefault(f, []).append(i)
1760
1769
1761 seen = {}
1770 seen = {}
1762 self.ui.status(_("checking manifests\n"))
1771 self.ui.status(_("checking manifests\n"))
1763 checksize(self.manifest, "manifest")
1772 checksize(self.manifest, "manifest")
1764
1773
1765 for i in range(self.manifest.count()):
1774 for i in range(self.manifest.count()):
1766 n = self.manifest.node(i)
1775 n = self.manifest.node(i)
1767 l = self.manifest.linkrev(n)
1776 l = self.manifest.linkrev(n)
1768
1777
1769 if l < 0 or l >= self.changelog.count():
1778 if l < 0 or l >= self.changelog.count():
1770 err(_("bad manifest link (%d) at revision %d") % (l, i))
1779 err(_("bad manifest link (%d) at revision %d") % (l, i))
1771
1780
1772 if n in neededmanifests:
1781 if n in neededmanifests:
1773 del neededmanifests[n]
1782 del neededmanifests[n]
1774
1783
1775 if n in seen:
1784 if n in seen:
1776 err(_("duplicate manifest at revision %d") % i)
1785 err(_("duplicate manifest at revision %d") % i)
1777
1786
1778 seen[n] = 1
1787 seen[n] = 1
1779
1788
1780 for p in self.manifest.parents(n):
1789 for p in self.manifest.parents(n):
1781 if p not in self.manifest.nodemap:
1790 if p not in self.manifest.nodemap:
1782 err(_("manifest %s has unknown parent %s") %
1791 err(_("manifest %s has unknown parent %s") %
1783 (short(n), short(p)))
1792 (short(n), short(p)))
1784
1793
1785 try:
1794 try:
1786 delta = mdiff.patchtext(self.manifest.delta(n))
1795 delta = mdiff.patchtext(self.manifest.delta(n))
1787 except KeyboardInterrupt:
1796 except KeyboardInterrupt:
1788 self.ui.warn(_("interrupted"))
1797 self.ui.warn(_("interrupted"))
1789 raise
1798 raise
1790 except Exception, inst:
1799 except Exception, inst:
1791 err(_("unpacking manifest %s: %s") % (short(n), inst))
1800 err(_("unpacking manifest %s: %s") % (short(n), inst))
1792 continue
1801 continue
1793
1802
1794 try:
1803 try:
1795 ff = [ l.split('\0') for l in delta.splitlines() ]
1804 ff = [ l.split('\0') for l in delta.splitlines() ]
1796 for f, fn in ff:
1805 for f, fn in ff:
1797 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1806 filenodes.setdefault(f, {})[bin(fn[:40])] = 1
1798 except (ValueError, TypeError), inst:
1807 except (ValueError, TypeError), inst:
1799 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1808 err(_("broken delta in manifest %s: %s") % (short(n), inst))
1800
1809
1801 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1810 self.ui.status(_("crosschecking files in changesets and manifests\n"))
1802
1811
1803 for m, c in neededmanifests.items():
1812 for m, c in neededmanifests.items():
1804 err(_("Changeset %s refers to unknown manifest %s") %
1813 err(_("Changeset %s refers to unknown manifest %s") %
1805 (short(m), short(c)))
1814 (short(m), short(c)))
1806 del neededmanifests
1815 del neededmanifests
1807
1816
1808 for f in filenodes:
1817 for f in filenodes:
1809 if f not in filelinkrevs:
1818 if f not in filelinkrevs:
1810 err(_("file %s in manifest but not in changesets") % f)
1819 err(_("file %s in manifest but not in changesets") % f)
1811
1820
1812 for f in filelinkrevs:
1821 for f in filelinkrevs:
1813 if f not in filenodes:
1822 if f not in filenodes:
1814 err(_("file %s in changeset but not in manifest") % f)
1823 err(_("file %s in changeset but not in manifest") % f)
1815
1824
1816 self.ui.status(_("checking files\n"))
1825 self.ui.status(_("checking files\n"))
1817 ff = filenodes.keys()
1826 ff = filenodes.keys()
1818 ff.sort()
1827 ff.sort()
1819 for f in ff:
1828 for f in ff:
1820 if f == "/dev/null":
1829 if f == "/dev/null":
1821 continue
1830 continue
1822 files += 1
1831 files += 1
1823 if not f:
1832 if not f:
1824 err(_("file without name in manifest %s") % short(n))
1833 err(_("file without name in manifest %s") % short(n))
1825 continue
1834 continue
1826 fl = self.file(f)
1835 fl = self.file(f)
1827 checksize(fl, f)
1836 checksize(fl, f)
1828
1837
1829 nodes = {nullid: 1}
1838 nodes = {nullid: 1}
1830 seen = {}
1839 seen = {}
1831 for i in range(fl.count()):
1840 for i in range(fl.count()):
1832 revisions += 1
1841 revisions += 1
1833 n = fl.node(i)
1842 n = fl.node(i)
1834
1843
1835 if n in seen:
1844 if n in seen:
1836 err(_("%s: duplicate revision %d") % (f, i))
1845 err(_("%s: duplicate revision %d") % (f, i))
1837 if n not in filenodes[f]:
1846 if n not in filenodes[f]:
1838 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1847 err(_("%s: %d:%s not in manifests") % (f, i, short(n)))
1839 else:
1848 else:
1840 del filenodes[f][n]
1849 del filenodes[f][n]
1841
1850
1842 flr = fl.linkrev(n)
1851 flr = fl.linkrev(n)
1843 if flr not in filelinkrevs.get(f, []):
1852 if flr not in filelinkrevs.get(f, []):
1844 err(_("%s:%s points to unexpected changeset %d")
1853 err(_("%s:%s points to unexpected changeset %d")
1845 % (f, short(n), flr))
1854 % (f, short(n), flr))
1846 else:
1855 else:
1847 filelinkrevs[f].remove(flr)
1856 filelinkrevs[f].remove(flr)
1848
1857
1849 # verify contents
1858 # verify contents
1850 try:
1859 try:
1851 t = fl.read(n)
1860 t = fl.read(n)
1852 except KeyboardInterrupt:
1861 except KeyboardInterrupt:
1853 self.ui.warn(_("interrupted"))
1862 self.ui.warn(_("interrupted"))
1854 raise
1863 raise
1855 except Exception, inst:
1864 except Exception, inst:
1856 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1865 err(_("unpacking file %s %s: %s") % (f, short(n), inst))
1857
1866
1858 # verify parents
1867 # verify parents
1859 (p1, p2) = fl.parents(n)
1868 (p1, p2) = fl.parents(n)
1860 if p1 not in nodes:
1869 if p1 not in nodes:
1861 err(_("file %s:%s unknown parent 1 %s") %
1870 err(_("file %s:%s unknown parent 1 %s") %
1862 (f, short(n), short(p1)))
1871 (f, short(n), short(p1)))
1863 if p2 not in nodes:
1872 if p2 not in nodes:
1864 err(_("file %s:%s unknown parent 2 %s") %
1873 err(_("file %s:%s unknown parent 2 %s") %
1865 (f, short(n), short(p1)))
1874 (f, short(n), short(p1)))
1866 nodes[n] = 1
1875 nodes[n] = 1
1867
1876
1868 # cross-check
1877 # cross-check
1869 for node in filenodes[f]:
1878 for node in filenodes[f]:
1870 err(_("node %s in manifests not in %s") % (hex(node), f))
1879 err(_("node %s in manifests not in %s") % (hex(node), f))
1871
1880
1872 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1881 self.ui.status(_("%d files, %d changesets, %d total revisions\n") %
1873 (files, changesets, revisions))
1882 (files, changesets, revisions))
1874
1883
1875 if errors[0]:
1884 if errors[0]:
1876 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1885 self.ui.warn(_("%d integrity errors encountered!\n") % errors[0])
1877 return 1
1886 return 1
1878
1887
1879 # used to avoid circular references so destructors work
1888 # used to avoid circular references so destructors work
1880 def aftertrans(base):
1889 def aftertrans(base):
1881 p = base
1890 p = base
1882 def a():
1891 def a():
1883 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1892 util.rename(os.path.join(p, "journal"), os.path.join(p, "undo"))
1884 util.rename(os.path.join(p, "journal.dirstate"),
1893 util.rename(os.path.join(p, "journal.dirstate"),
1885 os.path.join(p, "undo.dirstate"))
1894 os.path.join(p, "undo.dirstate"))
1886 return a
1895 return a
1887
1896
General Comments 0
You need to be logged in to leave comments. Login now